repository_name
stringclasses
316 values
func_path_in_repository
stringlengths
6
223
func_name
stringlengths
1
134
language
stringclasses
1 value
func_code_string
stringlengths
57
65.5k
func_documentation_string
stringlengths
1
46.3k
split_name
stringclasses
1 value
func_code_url
stringlengths
91
315
called_functions
listlengths
1
156
enclosing_scope
stringlengths
2
1.48M
saltstack/salt
salt/modules/rh_ip.py
_read_file
python
def _read_file(path): ''' Reads and returns the contents of a file ''' try: with salt.utils.files.fopen(path, 'rb') as rfh: lines = salt.utils.stringutils.to_unicode(rfh.read()).splitlines() try: lines.remove('') except ValueError: pass return lines except Exception: return []
Reads and returns the contents of a file
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/rh_ip.py#L933-L946
[ "def fopen(*args, **kwargs):\n '''\n Wrapper around open() built-in to set CLOEXEC on the fd.\n\n This flag specifies that the file descriptor should be closed when an exec\n function is invoked;\n\n When a file descriptor is allocated (as with open or dup), this bit is\n initially cleared on the new file descriptor, meaning that descriptor will\n survive into the new program after exec.\n\n NB! We still have small race condition between open and fcntl.\n '''\n if six.PY3:\n try:\n # Don't permit stdin/stdout/stderr to be opened. The boolean False\n # and True are treated by Python 3's open() as file descriptors 0\n # and 1, respectively.\n if args[0] in (0, 1, 2):\n raise TypeError(\n '{0} is not a permitted file descriptor'.format(args[0])\n )\n except IndexError:\n pass\n binary = None\n # ensure 'binary' mode is always used on Windows in Python 2\n if ((six.PY2 and salt.utils.platform.is_windows() and 'binary' not in kwargs) or\n kwargs.pop('binary', False)):\n if len(args) > 1:\n args = list(args)\n if 'b' not in args[1]:\n args[1] = args[1].replace('t', 'b')\n if 'b' not in args[1]:\n args[1] += 'b'\n elif kwargs.get('mode'):\n if 'b' not in kwargs['mode']:\n kwargs['mode'] = kwargs['mode'].replace('t', 'b')\n if 'b' not in kwargs['mode']:\n kwargs['mode'] += 'b'\n else:\n # the default is to read\n kwargs['mode'] = 'rb'\n elif six.PY3 and 'encoding' not in kwargs:\n # In Python 3, if text mode is used and the encoding\n # is not specified, set the encoding to 'utf-8'.\n binary = False\n if len(args) > 1:\n args = list(args)\n if 'b' in args[1]:\n binary = True\n if kwargs.get('mode', None):\n if 'b' in kwargs['mode']:\n binary = True\n if not binary:\n kwargs['encoding'] = __salt_system_encoding__\n\n if six.PY3 and not binary and not kwargs.get('newline', None):\n kwargs['newline'] = ''\n\n f_handle = open(*args, **kwargs) # pylint: disable=resource-leakage\n\n if is_fcntl_available():\n # modify the file descriptor on systems with fcntl\n # unix and unix-like systems only\n try:\n FD_CLOEXEC = fcntl.FD_CLOEXEC # pylint: disable=C0103\n except AttributeError:\n FD_CLOEXEC = 1 # pylint: disable=C0103\n old_flags = fcntl.fcntl(f_handle.fileno(), fcntl.F_GETFD)\n fcntl.fcntl(f_handle.fileno(), fcntl.F_SETFD, old_flags | FD_CLOEXEC)\n\n return f_handle\n", "def to_unicode(s, encoding=None, errors='strict', normalize=False):\n '''\n Given str or unicode, return unicode (str for python 3)\n '''\n def _normalize(s):\n return unicodedata.normalize('NFC', s) if normalize else s\n\n if encoding is None:\n # Try utf-8 first, and fall back to detected encoding\n encoding = ('utf-8', __salt_system_encoding__)\n if not isinstance(encoding, (tuple, list)):\n encoding = (encoding,)\n\n if not encoding:\n raise ValueError('encoding cannot be empty')\n\n exc = None\n if six.PY3:\n if isinstance(s, str):\n return _normalize(s)\n elif isinstance(s, (bytes, bytearray)):\n return _normalize(to_str(s, encoding, errors))\n raise TypeError('expected str, bytes, or bytearray')\n else:\n # This needs to be str and not six.string_types, since if the string is\n # already a unicode type, it does not need to be decoded (and doing so\n # will raise an exception).\n if isinstance(s, unicode): # pylint: disable=incompatible-py3-code,undefined-variable\n return _normalize(s)\n elif isinstance(s, (str, bytearray)):\n for enc in encoding:\n try:\n return _normalize(s.decode(enc, errors))\n except UnicodeDecodeError as err:\n exc = err\n continue\n # The only way we get this far is if a UnicodeDecodeError was\n # raised, otherwise we would have already returned (or raised some\n # other exception).\n raise exc # pylint: disable=raising-bad-type\n raise TypeError('expected str or bytearray')\n" ]
# -*- coding: utf-8 -*- ''' The networking module for RHEL/Fedora based distros ''' from __future__ import absolute_import, unicode_literals, print_function # Import python libs import logging import os.path import os # Import third party libs import jinja2 import jinja2.exceptions # Import salt libs import salt.utils.files import salt.utils.stringutils import salt.utils.templates import salt.utils.validate.net from salt.exceptions import CommandExecutionError from salt.ext import six # Set up logging log = logging.getLogger(__name__) # Set up template environment JINJA = jinja2.Environment( loader=jinja2.FileSystemLoader( os.path.join(salt.utils.templates.TEMPLATE_DIRNAME, 'rh_ip') ) ) # Define the module's virtual name __virtualname__ = 'ip' def __virtual__(): ''' Confine this module to RHEL/Fedora based distros ''' if __grains__['os_family'] == 'RedHat': return __virtualname__ return (False, 'The rh_ip execution module cannot be loaded: this module is only available on RHEL/Fedora based distributions.') # Setup networking attributes _ETHTOOL_CONFIG_OPTS = [ 'autoneg', 'speed', 'duplex', 'rx', 'tx', 'sg', 'tso', 'ufo', 'gso', 'gro', 'lro', 'advertise' ] _RH_CONFIG_OPTS = [ 'domain', 'peerdns', 'peerntp', 'defroute', 'mtu', 'static-routes', 'gateway', 'zone' ] _RH_CONFIG_BONDING_OPTS = [ 'mode', 'miimon', 'arp_interval', 'arp_ip_target', 'downdelay', 'updelay', 'use_carrier', 'lacp_rate', 'hashing-algorithm', 'max_bonds', 'tx_queues', 'num_grat_arp', 'num_unsol_na', 'primary', 'primary_reselect', 'ad_select', 'xmit_hash_policy', 'arp_validate', 'fail_over_mac', 'all_slaves_active', 'resend_igmp' ] _RH_NETWORK_SCRIPT_DIR = '/etc/sysconfig/network-scripts' _RH_NETWORK_FILE = '/etc/sysconfig/network' _RH_NETWORK_CONF_FILES = '/etc/modprobe.d' _CONFIG_TRUE = ['yes', 'on', 'true', '1', True] _CONFIG_FALSE = ['no', 'off', 'false', '0', False] _IFACE_TYPES = [ 'eth', 'bond', 'alias', 'clone', 'ipsec', 'dialup', 'bridge', 'slave', 'vlan', 'ipip', 'ib', ] def _error_msg_iface(iface, option, expected): ''' Build an appropriate error message from a given option and a list of expected values. ''' msg = 'Invalid option -- Interface: {0}, Option: {1}, Expected: [{2}]' return msg.format(iface, option, '|'.join(str(e) for e in expected)) def _error_msg_routes(iface, option, expected): ''' Build an appropriate error message from a given option and a list of expected values. ''' msg = 'Invalid option -- Route interface: {0}, Option: {1}, Expected: [{2}]' return msg.format(iface, option, expected) def _log_default_iface(iface, opt, value): log.info('Using default option -- Interface: %s Option: %s Value: %s', iface, opt, value) def _error_msg_network(option, expected): ''' Build an appropriate error message from a given option and a list of expected values. ''' msg = 'Invalid network setting -- Setting: {0}, Expected: [{1}]' return msg.format(option, '|'.join(str(e) for e in expected)) def _log_default_network(opt, value): log.info('Using existing setting -- Setting: %s Value: %s', opt, value) def _parse_rh_config(path): rh_config = _read_file(path) cv_rh_config = {} if rh_config: for line in rh_config: line = line.strip() if not line or line.startswith('!') or line.startswith('#'): continue pair = [p.rstrip() for p in line.split('=', 1)] if len(pair) != 2: continue name, value = pair cv_rh_config[name.upper()] = value return cv_rh_config def _parse_ethtool_opts(opts, iface): ''' Filters given options and outputs valid settings for ETHTOOLS_OPTS If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' config = {} if 'autoneg' in opts: if opts['autoneg'] in _CONFIG_TRUE: config.update({'autoneg': 'on'}) elif opts['autoneg'] in _CONFIG_FALSE: config.update({'autoneg': 'off'}) else: _raise_error_iface(iface, 'autoneg', _CONFIG_TRUE + _CONFIG_FALSE) if 'duplex' in opts: valid = ['full', 'half'] if opts['duplex'] in valid: config.update({'duplex': opts['duplex']}) else: _raise_error_iface(iface, 'duplex', valid) if 'speed' in opts: valid = ['10', '100', '1000', '10000'] if six.text_type(opts['speed']) in valid: config.update({'speed': opts['speed']}) else: _raise_error_iface(iface, opts['speed'], valid) if 'advertise' in opts: valid = [ '0x001', '0x002', '0x004', '0x008', '0x010', '0x020', '0x20000', '0x8000', '0x1000', '0x40000', '0x80000', '0x200000', '0x400000', '0x800000', '0x1000000', '0x2000000', '0x4000000' ] if six.text_type(opts['advertise']) in valid: config.update({'advertise': opts['advertise']}) else: _raise_error_iface(iface, 'advertise', valid) valid = _CONFIG_TRUE + _CONFIG_FALSE for option in ('rx', 'tx', 'sg', 'tso', 'ufo', 'gso', 'gro', 'lro'): if option in opts: if opts[option] in _CONFIG_TRUE: config.update({option: 'on'}) elif opts[option] in _CONFIG_FALSE: config.update({option: 'off'}) else: _raise_error_iface(iface, option, valid) return config def _parse_settings_bond(opts, iface): ''' Filters given options and outputs valid settings for requested operation. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond_def = { # 803.ad aggregation selection logic # 0 for stable (default) # 1 for bandwidth # 2 for count 'ad_select': '0', # Max number of transmit queues (default = 16) 'tx_queues': '16', # Link monitoring in milliseconds. Most NICs support this 'miimon': '100', # ARP interval in milliseconds 'arp_interval': '250', # Delay before considering link down in milliseconds (miimon * 2) 'downdelay': '200', # lacp_rate 0: Slow - every 30 seconds # lacp_rate 1: Fast - every 1 second 'lacp_rate': '0', # Max bonds for this driver 'max_bonds': '1', # Specifies the time, in milliseconds, to wait before # enabling a slave after a link recovery has been # detected. Only used with miimon. 'updelay': '0', # Used with miimon. # On: driver sends mii # Off: ethtool sends mii 'use_carrier': '0', # Default. Don't change unless you know what you are doing. 'xmit_hash_policy': 'layer2', } if opts['mode'] in ['balance-rr', '0']: log.info( 'Device: %s Bonding Mode: load balancing (round-robin)', iface ) return _parse_settings_bond_0(opts, iface, bond_def) elif opts['mode'] in ['active-backup', '1']: log.info( 'Device: %s Bonding Mode: fault-tolerance (active-backup)', iface ) return _parse_settings_bond_1(opts, iface, bond_def) elif opts['mode'] in ['balance-xor', '2']: log.info( 'Device: %s Bonding Mode: load balancing (xor)', iface ) return _parse_settings_bond_2(opts, iface, bond_def) elif opts['mode'] in ['broadcast', '3']: log.info( 'Device: %s Bonding Mode: fault-tolerance (broadcast)', iface ) return _parse_settings_bond_3(opts, iface, bond_def) elif opts['mode'] in ['802.3ad', '4']: log.info( 'Device: %s Bonding Mode: IEEE 802.3ad Dynamic link ' 'aggregation', iface ) return _parse_settings_bond_4(opts, iface, bond_def) elif opts['mode'] in ['balance-tlb', '5']: log.info( 'Device: %s Bonding Mode: transmit load balancing', iface ) return _parse_settings_bond_5(opts, iface, bond_def) elif opts['mode'] in ['balance-alb', '6']: log.info( 'Device: %s Bonding Mode: adaptive load balancing', iface ) return _parse_settings_bond_6(opts, iface, bond_def) else: valid = [ '0', '1', '2', '3', '4', '5', '6', 'balance-rr', 'active-backup', 'balance-xor', 'broadcast', '802.3ad', 'balance-tlb', 'balance-alb' ] _raise_error_iface(iface, 'mode', valid) def _parse_settings_bond_0(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond0. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' # balance-rr shares miimon settings with balance-xor bond = _parse_settings_bond_1(opts, iface, bond_def) bond.update({'mode': '0'}) # ARP targets in n.n.n.n form valid = ['list of ips (up to 16)'] if 'arp_ip_target' in opts: if isinstance(opts['arp_ip_target'], list): if 1 <= len(opts['arp_ip_target']) <= 16: bond.update({'arp_ip_target': ''}) for ip in opts['arp_ip_target']: # pylint: disable=C0103 if bond['arp_ip_target']: bond['arp_ip_target'] = bond['arp_ip_target'] + ',' + ip else: bond['arp_ip_target'] = ip else: _raise_error_iface(iface, 'arp_ip_target', valid) else: _raise_error_iface(iface, 'arp_ip_target', valid) elif 'miimon' not in opts: _raise_error_iface(iface, 'arp_ip_target', valid) if 'arp_interval' in opts: try: int(opts['arp_interval']) bond.update({'arp_interval': opts['arp_interval']}) except Exception: _raise_error_iface(iface, 'arp_interval', ['integer']) else: _log_default_iface(iface, 'arp_interval', bond_def['arp_interval']) bond.update({'arp_interval': bond_def['arp_interval']}) return bond def _parse_settings_bond_1(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond1. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '1'} for binding in ['miimon', 'downdelay', 'updelay']: if binding in opts: try: int(opts[binding]) bond.update({binding: opts[binding]}) except Exception: _raise_error_iface(iface, binding, ['integer']) else: _log_default_iface(iface, binding, bond_def[binding]) bond.update({binding: bond_def[binding]}) if 'use_carrier' in opts: if opts['use_carrier'] in _CONFIG_TRUE: bond.update({'use_carrier': '1'}) elif opts['use_carrier'] in _CONFIG_FALSE: bond.update({'use_carrier': '0'}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'use_carrier', valid) else: _log_default_iface(iface, 'use_carrier', bond_def['use_carrier']) bond.update({'use_carrier': bond_def['use_carrier']}) if 'primary' in opts: bond.update({'primary': opts['primary']}) return bond def _parse_settings_bond_2(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond2. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '2'} valid = ['list of ips (up to 16)'] if 'arp_ip_target' in opts: if isinstance(opts['arp_ip_target'], list): if 1 <= len(opts['arp_ip_target']) <= 16: bond.update({'arp_ip_target': ''}) for ip in opts['arp_ip_target']: # pylint: disable=C0103 if bond['arp_ip_target']: bond['arp_ip_target'] = bond['arp_ip_target'] + ',' + ip else: bond['arp_ip_target'] = ip else: _raise_error_iface(iface, 'arp_ip_target', valid) else: _raise_error_iface(iface, 'arp_ip_target', valid) else: _raise_error_iface(iface, 'arp_ip_target', valid) if 'arp_interval' in opts: try: int(opts['arp_interval']) bond.update({'arp_interval': opts['arp_interval']}) except Exception: _raise_error_iface(iface, 'arp_interval', ['integer']) else: _log_default_iface(iface, 'arp_interval', bond_def['arp_interval']) bond.update({'arp_interval': bond_def['arp_interval']}) if 'hashing-algorithm' in opts: valid = ['layer2', 'layer2+3', 'layer3+4'] if opts['hashing-algorithm'] in valid: bond.update({'xmit_hash_policy': opts['hashing-algorithm']}) else: _raise_error_iface(iface, 'hashing-algorithm', valid) return bond def _parse_settings_bond_3(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond3. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '3'} for binding in ['miimon', 'downdelay', 'updelay']: if binding in opts: try: int(opts[binding]) bond.update({binding: opts[binding]}) except Exception: _raise_error_iface(iface, binding, ['integer']) else: _log_default_iface(iface, binding, bond_def[binding]) bond.update({binding: bond_def[binding]}) if 'use_carrier' in opts: if opts['use_carrier'] in _CONFIG_TRUE: bond.update({'use_carrier': '1'}) elif opts['use_carrier'] in _CONFIG_FALSE: bond.update({'use_carrier': '0'}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'use_carrier', valid) else: _log_default_iface(iface, 'use_carrier', bond_def['use_carrier']) bond.update({'use_carrier': bond_def['use_carrier']}) return bond def _parse_settings_bond_4(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond4. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '4'} for binding in ['miimon', 'downdelay', 'updelay', 'lacp_rate', 'ad_select']: if binding in opts: if binding == 'lacp_rate': if opts[binding] == 'fast': opts.update({binding: '1'}) if opts[binding] == 'slow': opts.update({binding: '0'}) valid = ['fast', '1', 'slow', '0'] else: valid = ['integer'] try: int(opts[binding]) bond.update({binding: opts[binding]}) except Exception: _raise_error_iface(iface, binding, valid) else: _log_default_iface(iface, binding, bond_def[binding]) bond.update({binding: bond_def[binding]}) if 'use_carrier' in opts: if opts['use_carrier'] in _CONFIG_TRUE: bond.update({'use_carrier': '1'}) elif opts['use_carrier'] in _CONFIG_FALSE: bond.update({'use_carrier': '0'}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'use_carrier', valid) else: _log_default_iface(iface, 'use_carrier', bond_def['use_carrier']) bond.update({'use_carrier': bond_def['use_carrier']}) if 'hashing-algorithm' in opts: valid = ['layer2', 'layer2+3', 'layer3+4'] if opts['hashing-algorithm'] in valid: bond.update({'xmit_hash_policy': opts['hashing-algorithm']}) else: _raise_error_iface(iface, 'hashing-algorithm', valid) return bond def _parse_settings_bond_5(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond5. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '5'} for binding in ['miimon', 'downdelay', 'updelay']: if binding in opts: try: int(opts[binding]) bond.update({binding: opts[binding]}) except Exception: _raise_error_iface(iface, binding, ['integer']) else: _log_default_iface(iface, binding, bond_def[binding]) bond.update({binding: bond_def[binding]}) if 'use_carrier' in opts: if opts['use_carrier'] in _CONFIG_TRUE: bond.update({'use_carrier': '1'}) elif opts['use_carrier'] in _CONFIG_FALSE: bond.update({'use_carrier': '0'}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'use_carrier', valid) else: _log_default_iface(iface, 'use_carrier', bond_def['use_carrier']) bond.update({'use_carrier': bond_def['use_carrier']}) if 'primary' in opts: bond.update({'primary': opts['primary']}) return bond def _parse_settings_bond_6(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond6. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '6'} for binding in ['miimon', 'downdelay', 'updelay']: if binding in opts: try: int(opts[binding]) bond.update({binding: opts[binding]}) except Exception: _raise_error_iface(iface, binding, ['integer']) else: _log_default_iface(iface, binding, bond_def[binding]) bond.update({binding: bond_def[binding]}) if 'use_carrier' in opts: if opts['use_carrier'] in _CONFIG_TRUE: bond.update({'use_carrier': '1'}) elif opts['use_carrier'] in _CONFIG_FALSE: bond.update({'use_carrier': '0'}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'use_carrier', valid) else: _log_default_iface(iface, 'use_carrier', bond_def['use_carrier']) bond.update({'use_carrier': bond_def['use_carrier']}) if 'primary' in opts: bond.update({'primary': opts['primary']}) return bond def _parse_settings_vlan(opts, iface): ''' Filters given options and outputs valid settings for a vlan ''' vlan = {} if 'reorder_hdr' in opts: if opts['reorder_hdr'] in _CONFIG_TRUE + _CONFIG_FALSE: vlan.update({'reorder_hdr': opts['reorder_hdr']}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'reorder_hdr', valid) if 'vlan_id' in opts: if opts['vlan_id'] > 0: vlan.update({'vlan_id': opts['vlan_id']}) else: _raise_error_iface(iface, 'vlan_id', 'Positive integer') if 'phys_dev' in opts: if opts['phys_dev']: vlan.update({'phys_dev': opts['phys_dev']}) else: _raise_error_iface(iface, 'phys_dev', 'Non-empty string') return vlan def _parse_settings_eth(opts, iface_type, enabled, iface): ''' Filters given options and outputs valid settings for a network interface. ''' result = {'name': iface} if 'proto' in opts: valid = ['none', 'bootp', 'dhcp'] if opts['proto'] in valid: result['proto'] = opts['proto'] else: _raise_error_iface(iface, opts['proto'], valid) if 'dns' in opts: result['dns'] = opts['dns'] result['peerdns'] = 'yes' if 'mtu' in opts: try: result['mtu'] = int(opts['mtu']) except ValueError: _raise_error_iface(iface, 'mtu', ['integer']) if iface_type not in ['bridge']: ethtool = _parse_ethtool_opts(opts, iface) if ethtool: result['ethtool'] = ethtool if iface_type == 'slave': result['proto'] = 'none' if iface_type == 'bond': bonding = _parse_settings_bond(opts, iface) if bonding: result['bonding'] = bonding result['devtype'] = "Bond" if iface_type == 'vlan': vlan = _parse_settings_vlan(opts, iface) if vlan: result['devtype'] = "Vlan" for opt in vlan: result[opt] = opts[opt] if iface_type not in ['bond', 'vlan', 'bridge', 'ipip']: auto_addr = False if 'addr' in opts: if salt.utils.validate.net.mac(opts['addr']): result['addr'] = opts['addr'] elif opts['addr'] == 'auto': auto_addr = True elif opts['addr'] != 'none': _raise_error_iface(iface, opts['addr'], ['AA:BB:CC:DD:EE:FF', 'auto', 'none']) else: auto_addr = True if auto_addr: # If interface type is slave for bond, not setting hwaddr if iface_type != 'slave': ifaces = __salt__['network.interfaces']() if iface in ifaces and 'hwaddr' in ifaces[iface]: result['addr'] = ifaces[iface]['hwaddr'] if iface_type == 'eth': result['devtype'] = 'Ethernet' if iface_type == 'bridge': result['devtype'] = 'Bridge' bypassfirewall = True valid = _CONFIG_TRUE + _CONFIG_FALSE for opt in ['bypassfirewall']: if opt in opts: if opts[opt] in _CONFIG_TRUE: bypassfirewall = True elif opts[opt] in _CONFIG_FALSE: bypassfirewall = False else: _raise_error_iface(iface, opts[opt], valid) bridgectls = [ 'net.bridge.bridge-nf-call-ip6tables', 'net.bridge.bridge-nf-call-iptables', 'net.bridge.bridge-nf-call-arptables', ] if bypassfirewall: sysctl_value = 0 else: sysctl_value = 1 for sysctl in bridgectls: try: __salt__['sysctl.persist'](sysctl, sysctl_value) except CommandExecutionError: log.warning('Failed to set sysctl: %s', sysctl) else: if 'bridge' in opts: result['bridge'] = opts['bridge'] if iface_type == 'ipip': result['devtype'] = 'IPIP' for opt in ['my_inner_ipaddr', 'my_outer_ipaddr']: if opt not in opts: _raise_error_iface(iface, opts[opt], ['1.2.3.4']) else: result[opt] = opts[opt] if iface_type == 'ib': result['devtype'] = 'InfiniBand' if 'prefix' in opts: if 'netmask' in opts: msg = 'Cannot use prefix and netmask together' log.error(msg) raise AttributeError(msg) result['prefix'] = opts['prefix'] elif 'netmask' in opts: result['netmask'] = opts['netmask'] for opt in ['ipaddr', 'master', 'srcaddr', 'delay', 'domain', 'gateway', 'uuid', 'nickname', 'zone']: if opt in opts: result[opt] = opts[opt] for opt in ['ipv6addr', 'ipv6gateway']: if opt in opts: result[opt] = opts[opt] if 'ipaddrs' in opts: result['ipaddrs'] = [] for opt in opts['ipaddrs']: if salt.utils.validate.net.ipv4_addr(opt): ip, prefix = [i.strip() for i in opt.split('/')] result['ipaddrs'].append({'ipaddr': ip, 'prefix': prefix}) else: msg = 'ipv4 CIDR is invalid' log.error(msg) raise AttributeError(msg) if 'ipv6addrs' in opts: for opt in opts['ipv6addrs']: if not salt.utils.validate.net.ipv6_addr(opt): msg = 'ipv6 CIDR is invalid' log.error(msg) raise AttributeError(msg) result['ipv6addrs'] = opts['ipv6addrs'] if 'enable_ipv6' in opts: result['enable_ipv6'] = opts['enable_ipv6'] valid = _CONFIG_TRUE + _CONFIG_FALSE for opt in ['onparent', 'peerdns', 'peerroutes', 'slave', 'vlan', 'defroute', 'stp', 'ipv6_peerdns', 'ipv6_defroute', 'ipv6_peerroutes', 'ipv6_autoconf', 'ipv4_failure_fatal', 'dhcpv6c']: if opt in opts: if opts[opt] in _CONFIG_TRUE: result[opt] = 'yes' elif opts[opt] in _CONFIG_FALSE: result[opt] = 'no' else: _raise_error_iface(iface, opts[opt], valid) if 'onboot' in opts: log.warning( 'The \'onboot\' option is controlled by the \'enabled\' option. ' 'Interface: %s Enabled: %s', iface, enabled ) if enabled: result['onboot'] = 'yes' else: result['onboot'] = 'no' # If the interface is defined then we want to always take # control away from non-root users; unless the administrator # wants to allow non-root users to control the device. if 'userctl' in opts: if opts['userctl'] in _CONFIG_TRUE: result['userctl'] = 'yes' elif opts['userctl'] in _CONFIG_FALSE: result['userctl'] = 'no' else: _raise_error_iface(iface, opts['userctl'], valid) else: result['userctl'] = 'no' # This vlan is in opts, and should be only used in range interface # will affect jinja template for interface generating if 'vlan' in opts: if opts['vlan'] in _CONFIG_TRUE: result['vlan'] = 'yes' elif opts['vlan'] in _CONFIG_FALSE: result['vlan'] = 'no' else: _raise_error_iface(iface, opts['vlan'], valid) if 'arpcheck' in opts: if opts['arpcheck'] in _CONFIG_FALSE: result['arpcheck'] = 'no' if 'ipaddr_start' in opts: result['ipaddr_start'] = opts['ipaddr_start'] if 'ipaddr_end' in opts: result['ipaddr_end'] = opts['ipaddr_end'] if 'clonenum_start' in opts: result['clonenum_start'] = opts['clonenum_start'] # If NetworkManager is available, we can control whether we use # it or not if 'nm_controlled' in opts: if opts['nm_controlled'] in _CONFIG_TRUE: result['nm_controlled'] = 'yes' elif opts['nm_controlled'] in _CONFIG_FALSE: result['nm_controlled'] = 'no' else: _raise_error_iface(iface, opts['nm_controlled'], valid) else: result['nm_controlled'] = 'no' return result def _parse_routes(iface, opts): ''' Filters given options and outputs valid settings for the route settings file. ''' # Normalize keys opts = dict((k.lower(), v) for (k, v) in six.iteritems(opts)) result = {} if 'routes' not in opts: _raise_error_routes(iface, 'routes', 'List of routes') for opt in opts: result[opt] = opts[opt] return result def _parse_network_settings(opts, current): ''' Filters given options and outputs valid settings for the global network settings file. ''' # Normalize keys opts = dict((k.lower(), v) for (k, v) in six.iteritems(opts)) current = dict((k.lower(), v) for (k, v) in six.iteritems(current)) # Check for supported parameters retain_settings = opts.get('retain_settings', False) result = current if retain_settings else {} # Default quote type is an empty string, which will not quote values quote_type = '' valid = _CONFIG_TRUE + _CONFIG_FALSE if 'enabled' not in opts: try: opts['networking'] = current['networking'] # If networking option is quoted, use its quote type quote_type = salt.utils.stringutils.is_quoted(opts['networking']) _log_default_network('networking', current['networking']) except ValueError: _raise_error_network('networking', valid) else: opts['networking'] = opts['enabled'] true_val = '{0}yes{0}'.format(quote_type) false_val = '{0}no{0}'.format(quote_type) networking = salt.utils.stringutils.dequote(opts['networking']) if networking in valid: if networking in _CONFIG_TRUE: result['networking'] = true_val elif networking in _CONFIG_FALSE: result['networking'] = false_val else: _raise_error_network('networking', valid) if 'hostname' not in opts: try: opts['hostname'] = current['hostname'] _log_default_network('hostname', current['hostname']) except Exception: _raise_error_network('hostname', ['server1.example.com']) if opts['hostname']: result['hostname'] = '{1}{0}{1}'.format( salt.utils.stringutils.dequote(opts['hostname']), quote_type) else: _raise_error_network('hostname', ['server1.example.com']) if 'nozeroconf' in opts: nozeroconf = salt.utils.stringutils.dequote(opts['nozeroconf']) if nozeroconf in valid: if nozeroconf in _CONFIG_TRUE: result['nozeroconf'] = true_val elif nozeroconf in _CONFIG_FALSE: result['nozeroconf'] = false_val else: _raise_error_network('nozeroconf', valid) for opt in opts: if opt not in ['networking', 'hostname', 'nozeroconf']: result[opt] = '{1}{0}{1}'.format( salt.utils.stringutils.dequote(opts[opt]), quote_type) return result def _raise_error_iface(iface, option, expected): ''' Log and raise an error with a logical formatted message. ''' msg = _error_msg_iface(iface, option, expected) log.error(msg) raise AttributeError(msg) def _raise_error_network(option, expected): ''' Log and raise an error with a logical formatted message. ''' msg = _error_msg_network(option, expected) log.error(msg) raise AttributeError(msg) def _raise_error_routes(iface, option, expected): ''' Log and raise an error with a logical formatted message. ''' msg = _error_msg_routes(iface, option, expected) log.error(msg) raise AttributeError(msg) # Return empty list for type consistency def _write_file_iface(iface, data, folder, pattern): ''' Writes a file to disk ''' filename = os.path.join(folder, pattern.format(iface)) if not os.path.exists(folder): msg = '{0} cannot be written. {1} does not exist' msg = msg.format(filename, folder) log.error(msg) raise AttributeError(msg) with salt.utils.files.fopen(filename, 'w') as fp_: fp_.write(salt.utils.stringutils.to_str(data)) def _write_file_network(data, filename): ''' Writes a file to disk ''' with salt.utils.files.fopen(filename, 'w') as fp_: fp_.write(salt.utils.stringutils.to_str(data)) def _read_temp(data): lines = data.splitlines() try: # Discard newlines if they exist lines.remove('') except ValueError: pass return lines def build_bond(iface, **settings): ''' Create a bond script in /etc/modprobe.d with the passed settings and load the bonding kernel module. CLI Example: .. code-block:: bash salt '*' ip.build_bond bond0 mode=balance-alb ''' rh_major = __grains__['osrelease'][:1] opts = _parse_settings_bond(settings, iface) try: template = JINJA.get_template('conf.jinja') except jinja2.exceptions.TemplateNotFound: log.error('Could not load template conf.jinja') return '' data = template.render({'name': iface, 'bonding': opts}) _write_file_iface(iface, data, _RH_NETWORK_CONF_FILES, '{0}.conf'.format(iface)) path = os.path.join(_RH_NETWORK_CONF_FILES, '{0}.conf'.format(iface)) if rh_major == '5': __salt__['cmd.run']( 'sed -i -e "/^alias\\s{0}.*/d" /etc/modprobe.conf'.format(iface), python_shell=False ) __salt__['cmd.run']( 'sed -i -e "/^options\\s{0}.*/d" /etc/modprobe.conf'.format(iface), python_shell=False ) __salt__['file.append']('/etc/modprobe.conf', path) __salt__['kmod.load']('bonding') if settings['test']: return _read_temp(data) return _read_file(path) def build_interface(iface, iface_type, enabled, **settings): ''' Build an interface script for a network interface. CLI Example: .. code-block:: bash salt '*' ip.build_interface eth0 eth <settings> ''' if __grains__['os'] == 'Fedora': if __grains__['osmajorrelease'] >= 18: rh_major = '7' else: rh_major = '6' else: rh_major = __grains__['osrelease'][:1] iface_type = iface_type.lower() if iface_type not in _IFACE_TYPES: _raise_error_iface(iface, iface_type, _IFACE_TYPES) if iface_type == 'slave': settings['slave'] = 'yes' if 'master' not in settings: msg = 'master is a required setting for slave interfaces' log.error(msg) raise AttributeError(msg) if iface_type == 'vlan': settings['vlan'] = 'yes' if iface_type == 'bridge': __salt__['pkg.install']('bridge-utils') if iface_type in ['eth', 'bond', 'bridge', 'slave', 'vlan', 'ipip', 'ib', 'alias']: opts = _parse_settings_eth(settings, iface_type, enabled, iface) try: template = JINJA.get_template('rh{0}_eth.jinja'.format(rh_major)) except jinja2.exceptions.TemplateNotFound: log.error( 'Could not load template rh%s_eth.jinja', rh_major ) return '' ifcfg = template.render(opts) if 'test' in settings and settings['test']: return _read_temp(ifcfg) _write_file_iface(iface, ifcfg, _RH_NETWORK_SCRIPT_DIR, 'ifcfg-{0}') path = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'ifcfg-{0}'.format(iface)) return _read_file(path) def build_routes(iface, **settings): ''' Build a route script for a network interface. CLI Example: .. code-block:: bash salt '*' ip.build_routes eth0 <settings> ''' template = 'rh6_route_eth.jinja' try: if int(__grains__['osrelease'][0]) < 6: template = 'route_eth.jinja' except ValueError: pass log.debug('Template name: %s', template) opts = _parse_routes(iface, settings) log.debug('Opts: \n %s', opts) try: template = JINJA.get_template(template) except jinja2.exceptions.TemplateNotFound: log.error('Could not load template %s', template) return '' opts6 = [] opts4 = [] for route in opts['routes']: ipaddr = route['ipaddr'] if salt.utils.validate.net.ipv6_addr(ipaddr): opts6.append(route) else: opts4.append(route) log.debug("IPv4 routes:\n%s", opts4) log.debug("IPv6 routes:\n%s", opts6) routecfg = template.render(routes=opts4, iface=iface) routecfg6 = template.render(routes=opts6, iface=iface) if settings['test']: routes = _read_temp(routecfg) routes.extend(_read_temp(routecfg6)) return routes _write_file_iface(iface, routecfg, _RH_NETWORK_SCRIPT_DIR, 'route-{0}') _write_file_iface(iface, routecfg6, _RH_NETWORK_SCRIPT_DIR, 'route6-{0}') path = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'route-{0}'.format(iface)) path6 = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'route6-{0}'.format(iface)) routes = _read_file(path) routes.extend(_read_file(path6)) return routes def down(iface, iface_type): ''' Shutdown a network interface CLI Example: .. code-block:: bash salt '*' ip.down eth0 ''' # Slave devices are controlled by the master. if iface_type not in ['slave']: return __salt__['cmd.run']('ifdown {0}'.format(iface)) return None def get_bond(iface): ''' Return the content of a bond script CLI Example: .. code-block:: bash salt '*' ip.get_bond bond0 ''' path = os.path.join(_RH_NETWORK_CONF_FILES, '{0}.conf'.format(iface)) return _read_file(path) def get_interface(iface): ''' Return the contents of an interface script CLI Example: .. code-block:: bash salt '*' ip.get_interface eth0 ''' path = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'ifcfg-{0}'.format(iface)) return _read_file(path) def up(iface, iface_type): # pylint: disable=C0103 ''' Start up a network interface CLI Example: .. code-block:: bash salt '*' ip.up eth0 ''' # Slave devices are controlled by the master. if iface_type not in ['slave']: return __salt__['cmd.run']('ifup {0}'.format(iface)) return None def get_routes(iface): ''' Return the contents of the interface routes script. CLI Example: .. code-block:: bash salt '*' ip.get_routes eth0 ''' path = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'route-{0}'.format(iface)) path6 = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'route6-{0}'.format(iface)) routes = _read_file(path) routes.extend(_read_file(path6)) return routes def get_network_settings(): ''' Return the contents of the global network script. CLI Example: .. code-block:: bash salt '*' ip.get_network_settings ''' return _read_file(_RH_NETWORK_FILE) def apply_network_settings(**settings): ''' Apply global network configuration. CLI Example: .. code-block:: bash salt '*' ip.apply_network_settings ''' if 'require_reboot' not in settings: settings['require_reboot'] = False if 'apply_hostname' not in settings: settings['apply_hostname'] = False hostname_res = True if settings['apply_hostname'] in _CONFIG_TRUE: if 'hostname' in settings: hostname_res = __salt__['network.mod_hostname'](settings['hostname']) else: log.warning( 'The network state sls is trying to apply hostname ' 'changes but no hostname is defined.' ) hostname_res = False res = True if settings['require_reboot'] in _CONFIG_TRUE: log.warning( 'The network state sls is requiring a reboot of the system to ' 'properly apply network configuration.' ) res = True else: res = __salt__['service.restart']('network') return hostname_res and res def build_network_settings(**settings): ''' Build the global network script. CLI Example: .. code-block:: bash salt '*' ip.build_network_settings <settings> ''' # Read current configuration and store default values current_network_settings = _parse_rh_config(_RH_NETWORK_FILE) # Build settings opts = _parse_network_settings(settings, current_network_settings) try: template = JINJA.get_template('network.jinja') except jinja2.exceptions.TemplateNotFound: log.error('Could not load template network.jinja') return '' network = template.render(opts) if settings['test']: return _read_temp(network) # Write settings _write_file_network(network, _RH_NETWORK_FILE) return _read_file(_RH_NETWORK_FILE)
saltstack/salt
salt/modules/rh_ip.py
_write_file_iface
python
def _write_file_iface(iface, data, folder, pattern): ''' Writes a file to disk ''' filename = os.path.join(folder, pattern.format(iface)) if not os.path.exists(folder): msg = '{0} cannot be written. {1} does not exist' msg = msg.format(filename, folder) log.error(msg) raise AttributeError(msg) with salt.utils.files.fopen(filename, 'w') as fp_: fp_.write(salt.utils.stringutils.to_str(data))
Writes a file to disk
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/rh_ip.py#L949-L960
[ "def fopen(*args, **kwargs):\n '''\n Wrapper around open() built-in to set CLOEXEC on the fd.\n\n This flag specifies that the file descriptor should be closed when an exec\n function is invoked;\n\n When a file descriptor is allocated (as with open or dup), this bit is\n initially cleared on the new file descriptor, meaning that descriptor will\n survive into the new program after exec.\n\n NB! We still have small race condition between open and fcntl.\n '''\n if six.PY3:\n try:\n # Don't permit stdin/stdout/stderr to be opened. The boolean False\n # and True are treated by Python 3's open() as file descriptors 0\n # and 1, respectively.\n if args[0] in (0, 1, 2):\n raise TypeError(\n '{0} is not a permitted file descriptor'.format(args[0])\n )\n except IndexError:\n pass\n binary = None\n # ensure 'binary' mode is always used on Windows in Python 2\n if ((six.PY2 and salt.utils.platform.is_windows() and 'binary' not in kwargs) or\n kwargs.pop('binary', False)):\n if len(args) > 1:\n args = list(args)\n if 'b' not in args[1]:\n args[1] = args[1].replace('t', 'b')\n if 'b' not in args[1]:\n args[1] += 'b'\n elif kwargs.get('mode'):\n if 'b' not in kwargs['mode']:\n kwargs['mode'] = kwargs['mode'].replace('t', 'b')\n if 'b' not in kwargs['mode']:\n kwargs['mode'] += 'b'\n else:\n # the default is to read\n kwargs['mode'] = 'rb'\n elif six.PY3 and 'encoding' not in kwargs:\n # In Python 3, if text mode is used and the encoding\n # is not specified, set the encoding to 'utf-8'.\n binary = False\n if len(args) > 1:\n args = list(args)\n if 'b' in args[1]:\n binary = True\n if kwargs.get('mode', None):\n if 'b' in kwargs['mode']:\n binary = True\n if not binary:\n kwargs['encoding'] = __salt_system_encoding__\n\n if six.PY3 and not binary and not kwargs.get('newline', None):\n kwargs['newline'] = ''\n\n f_handle = open(*args, **kwargs) # pylint: disable=resource-leakage\n\n if is_fcntl_available():\n # modify the file descriptor on systems with fcntl\n # unix and unix-like systems only\n try:\n FD_CLOEXEC = fcntl.FD_CLOEXEC # pylint: disable=C0103\n except AttributeError:\n FD_CLOEXEC = 1 # pylint: disable=C0103\n old_flags = fcntl.fcntl(f_handle.fileno(), fcntl.F_GETFD)\n fcntl.fcntl(f_handle.fileno(), fcntl.F_SETFD, old_flags | FD_CLOEXEC)\n\n return f_handle\n", "def to_str(s, encoding=None, errors='strict', normalize=False):\n '''\n Given str, bytes, bytearray, or unicode (py2), return str\n '''\n def _normalize(s):\n try:\n return unicodedata.normalize('NFC', s) if normalize else s\n except TypeError:\n return s\n\n if encoding is None:\n # Try utf-8 first, and fall back to detected encoding\n encoding = ('utf-8', __salt_system_encoding__)\n if not isinstance(encoding, (tuple, list)):\n encoding = (encoding,)\n\n if not encoding:\n raise ValueError('encoding cannot be empty')\n\n # This shouldn't be six.string_types because if we're on PY2 and we already\n # have a string, we should just return it.\n if isinstance(s, str):\n return _normalize(s)\n\n exc = None\n if six.PY3:\n if isinstance(s, (bytes, bytearray)):\n for enc in encoding:\n try:\n return _normalize(s.decode(enc, errors))\n except UnicodeDecodeError as err:\n exc = err\n continue\n # The only way we get this far is if a UnicodeDecodeError was\n # raised, otherwise we would have already returned (or raised some\n # other exception).\n raise exc # pylint: disable=raising-bad-type\n raise TypeError('expected str, bytes, or bytearray not {}'.format(type(s)))\n else:\n if isinstance(s, bytearray):\n return str(s) # future lint: disable=blacklisted-function\n if isinstance(s, unicode): # pylint: disable=incompatible-py3-code,undefined-variable\n for enc in encoding:\n try:\n return _normalize(s).encode(enc, errors)\n except UnicodeEncodeError as err:\n exc = err\n continue\n # The only way we get this far is if a UnicodeDecodeError was\n # raised, otherwise we would have already returned (or raised some\n # other exception).\n raise exc # pylint: disable=raising-bad-type\n raise TypeError('expected str, bytearray, or unicode')\n" ]
# -*- coding: utf-8 -*- ''' The networking module for RHEL/Fedora based distros ''' from __future__ import absolute_import, unicode_literals, print_function # Import python libs import logging import os.path import os # Import third party libs import jinja2 import jinja2.exceptions # Import salt libs import salt.utils.files import salt.utils.stringutils import salt.utils.templates import salt.utils.validate.net from salt.exceptions import CommandExecutionError from salt.ext import six # Set up logging log = logging.getLogger(__name__) # Set up template environment JINJA = jinja2.Environment( loader=jinja2.FileSystemLoader( os.path.join(salt.utils.templates.TEMPLATE_DIRNAME, 'rh_ip') ) ) # Define the module's virtual name __virtualname__ = 'ip' def __virtual__(): ''' Confine this module to RHEL/Fedora based distros ''' if __grains__['os_family'] == 'RedHat': return __virtualname__ return (False, 'The rh_ip execution module cannot be loaded: this module is only available on RHEL/Fedora based distributions.') # Setup networking attributes _ETHTOOL_CONFIG_OPTS = [ 'autoneg', 'speed', 'duplex', 'rx', 'tx', 'sg', 'tso', 'ufo', 'gso', 'gro', 'lro', 'advertise' ] _RH_CONFIG_OPTS = [ 'domain', 'peerdns', 'peerntp', 'defroute', 'mtu', 'static-routes', 'gateway', 'zone' ] _RH_CONFIG_BONDING_OPTS = [ 'mode', 'miimon', 'arp_interval', 'arp_ip_target', 'downdelay', 'updelay', 'use_carrier', 'lacp_rate', 'hashing-algorithm', 'max_bonds', 'tx_queues', 'num_grat_arp', 'num_unsol_na', 'primary', 'primary_reselect', 'ad_select', 'xmit_hash_policy', 'arp_validate', 'fail_over_mac', 'all_slaves_active', 'resend_igmp' ] _RH_NETWORK_SCRIPT_DIR = '/etc/sysconfig/network-scripts' _RH_NETWORK_FILE = '/etc/sysconfig/network' _RH_NETWORK_CONF_FILES = '/etc/modprobe.d' _CONFIG_TRUE = ['yes', 'on', 'true', '1', True] _CONFIG_FALSE = ['no', 'off', 'false', '0', False] _IFACE_TYPES = [ 'eth', 'bond', 'alias', 'clone', 'ipsec', 'dialup', 'bridge', 'slave', 'vlan', 'ipip', 'ib', ] def _error_msg_iface(iface, option, expected): ''' Build an appropriate error message from a given option and a list of expected values. ''' msg = 'Invalid option -- Interface: {0}, Option: {1}, Expected: [{2}]' return msg.format(iface, option, '|'.join(str(e) for e in expected)) def _error_msg_routes(iface, option, expected): ''' Build an appropriate error message from a given option and a list of expected values. ''' msg = 'Invalid option -- Route interface: {0}, Option: {1}, Expected: [{2}]' return msg.format(iface, option, expected) def _log_default_iface(iface, opt, value): log.info('Using default option -- Interface: %s Option: %s Value: %s', iface, opt, value) def _error_msg_network(option, expected): ''' Build an appropriate error message from a given option and a list of expected values. ''' msg = 'Invalid network setting -- Setting: {0}, Expected: [{1}]' return msg.format(option, '|'.join(str(e) for e in expected)) def _log_default_network(opt, value): log.info('Using existing setting -- Setting: %s Value: %s', opt, value) def _parse_rh_config(path): rh_config = _read_file(path) cv_rh_config = {} if rh_config: for line in rh_config: line = line.strip() if not line or line.startswith('!') or line.startswith('#'): continue pair = [p.rstrip() for p in line.split('=', 1)] if len(pair) != 2: continue name, value = pair cv_rh_config[name.upper()] = value return cv_rh_config def _parse_ethtool_opts(opts, iface): ''' Filters given options and outputs valid settings for ETHTOOLS_OPTS If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' config = {} if 'autoneg' in opts: if opts['autoneg'] in _CONFIG_TRUE: config.update({'autoneg': 'on'}) elif opts['autoneg'] in _CONFIG_FALSE: config.update({'autoneg': 'off'}) else: _raise_error_iface(iface, 'autoneg', _CONFIG_TRUE + _CONFIG_FALSE) if 'duplex' in opts: valid = ['full', 'half'] if opts['duplex'] in valid: config.update({'duplex': opts['duplex']}) else: _raise_error_iface(iface, 'duplex', valid) if 'speed' in opts: valid = ['10', '100', '1000', '10000'] if six.text_type(opts['speed']) in valid: config.update({'speed': opts['speed']}) else: _raise_error_iface(iface, opts['speed'], valid) if 'advertise' in opts: valid = [ '0x001', '0x002', '0x004', '0x008', '0x010', '0x020', '0x20000', '0x8000', '0x1000', '0x40000', '0x80000', '0x200000', '0x400000', '0x800000', '0x1000000', '0x2000000', '0x4000000' ] if six.text_type(opts['advertise']) in valid: config.update({'advertise': opts['advertise']}) else: _raise_error_iface(iface, 'advertise', valid) valid = _CONFIG_TRUE + _CONFIG_FALSE for option in ('rx', 'tx', 'sg', 'tso', 'ufo', 'gso', 'gro', 'lro'): if option in opts: if opts[option] in _CONFIG_TRUE: config.update({option: 'on'}) elif opts[option] in _CONFIG_FALSE: config.update({option: 'off'}) else: _raise_error_iface(iface, option, valid) return config def _parse_settings_bond(opts, iface): ''' Filters given options and outputs valid settings for requested operation. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond_def = { # 803.ad aggregation selection logic # 0 for stable (default) # 1 for bandwidth # 2 for count 'ad_select': '0', # Max number of transmit queues (default = 16) 'tx_queues': '16', # Link monitoring in milliseconds. Most NICs support this 'miimon': '100', # ARP interval in milliseconds 'arp_interval': '250', # Delay before considering link down in milliseconds (miimon * 2) 'downdelay': '200', # lacp_rate 0: Slow - every 30 seconds # lacp_rate 1: Fast - every 1 second 'lacp_rate': '0', # Max bonds for this driver 'max_bonds': '1', # Specifies the time, in milliseconds, to wait before # enabling a slave after a link recovery has been # detected. Only used with miimon. 'updelay': '0', # Used with miimon. # On: driver sends mii # Off: ethtool sends mii 'use_carrier': '0', # Default. Don't change unless you know what you are doing. 'xmit_hash_policy': 'layer2', } if opts['mode'] in ['balance-rr', '0']: log.info( 'Device: %s Bonding Mode: load balancing (round-robin)', iface ) return _parse_settings_bond_0(opts, iface, bond_def) elif opts['mode'] in ['active-backup', '1']: log.info( 'Device: %s Bonding Mode: fault-tolerance (active-backup)', iface ) return _parse_settings_bond_1(opts, iface, bond_def) elif opts['mode'] in ['balance-xor', '2']: log.info( 'Device: %s Bonding Mode: load balancing (xor)', iface ) return _parse_settings_bond_2(opts, iface, bond_def) elif opts['mode'] in ['broadcast', '3']: log.info( 'Device: %s Bonding Mode: fault-tolerance (broadcast)', iface ) return _parse_settings_bond_3(opts, iface, bond_def) elif opts['mode'] in ['802.3ad', '4']: log.info( 'Device: %s Bonding Mode: IEEE 802.3ad Dynamic link ' 'aggregation', iface ) return _parse_settings_bond_4(opts, iface, bond_def) elif opts['mode'] in ['balance-tlb', '5']: log.info( 'Device: %s Bonding Mode: transmit load balancing', iface ) return _parse_settings_bond_5(opts, iface, bond_def) elif opts['mode'] in ['balance-alb', '6']: log.info( 'Device: %s Bonding Mode: adaptive load balancing', iface ) return _parse_settings_bond_6(opts, iface, bond_def) else: valid = [ '0', '1', '2', '3', '4', '5', '6', 'balance-rr', 'active-backup', 'balance-xor', 'broadcast', '802.3ad', 'balance-tlb', 'balance-alb' ] _raise_error_iface(iface, 'mode', valid) def _parse_settings_bond_0(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond0. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' # balance-rr shares miimon settings with balance-xor bond = _parse_settings_bond_1(opts, iface, bond_def) bond.update({'mode': '0'}) # ARP targets in n.n.n.n form valid = ['list of ips (up to 16)'] if 'arp_ip_target' in opts: if isinstance(opts['arp_ip_target'], list): if 1 <= len(opts['arp_ip_target']) <= 16: bond.update({'arp_ip_target': ''}) for ip in opts['arp_ip_target']: # pylint: disable=C0103 if bond['arp_ip_target']: bond['arp_ip_target'] = bond['arp_ip_target'] + ',' + ip else: bond['arp_ip_target'] = ip else: _raise_error_iface(iface, 'arp_ip_target', valid) else: _raise_error_iface(iface, 'arp_ip_target', valid) elif 'miimon' not in opts: _raise_error_iface(iface, 'arp_ip_target', valid) if 'arp_interval' in opts: try: int(opts['arp_interval']) bond.update({'arp_interval': opts['arp_interval']}) except Exception: _raise_error_iface(iface, 'arp_interval', ['integer']) else: _log_default_iface(iface, 'arp_interval', bond_def['arp_interval']) bond.update({'arp_interval': bond_def['arp_interval']}) return bond def _parse_settings_bond_1(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond1. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '1'} for binding in ['miimon', 'downdelay', 'updelay']: if binding in opts: try: int(opts[binding]) bond.update({binding: opts[binding]}) except Exception: _raise_error_iface(iface, binding, ['integer']) else: _log_default_iface(iface, binding, bond_def[binding]) bond.update({binding: bond_def[binding]}) if 'use_carrier' in opts: if opts['use_carrier'] in _CONFIG_TRUE: bond.update({'use_carrier': '1'}) elif opts['use_carrier'] in _CONFIG_FALSE: bond.update({'use_carrier': '0'}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'use_carrier', valid) else: _log_default_iface(iface, 'use_carrier', bond_def['use_carrier']) bond.update({'use_carrier': bond_def['use_carrier']}) if 'primary' in opts: bond.update({'primary': opts['primary']}) return bond def _parse_settings_bond_2(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond2. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '2'} valid = ['list of ips (up to 16)'] if 'arp_ip_target' in opts: if isinstance(opts['arp_ip_target'], list): if 1 <= len(opts['arp_ip_target']) <= 16: bond.update({'arp_ip_target': ''}) for ip in opts['arp_ip_target']: # pylint: disable=C0103 if bond['arp_ip_target']: bond['arp_ip_target'] = bond['arp_ip_target'] + ',' + ip else: bond['arp_ip_target'] = ip else: _raise_error_iface(iface, 'arp_ip_target', valid) else: _raise_error_iface(iface, 'arp_ip_target', valid) else: _raise_error_iface(iface, 'arp_ip_target', valid) if 'arp_interval' in opts: try: int(opts['arp_interval']) bond.update({'arp_interval': opts['arp_interval']}) except Exception: _raise_error_iface(iface, 'arp_interval', ['integer']) else: _log_default_iface(iface, 'arp_interval', bond_def['arp_interval']) bond.update({'arp_interval': bond_def['arp_interval']}) if 'hashing-algorithm' in opts: valid = ['layer2', 'layer2+3', 'layer3+4'] if opts['hashing-algorithm'] in valid: bond.update({'xmit_hash_policy': opts['hashing-algorithm']}) else: _raise_error_iface(iface, 'hashing-algorithm', valid) return bond def _parse_settings_bond_3(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond3. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '3'} for binding in ['miimon', 'downdelay', 'updelay']: if binding in opts: try: int(opts[binding]) bond.update({binding: opts[binding]}) except Exception: _raise_error_iface(iface, binding, ['integer']) else: _log_default_iface(iface, binding, bond_def[binding]) bond.update({binding: bond_def[binding]}) if 'use_carrier' in opts: if opts['use_carrier'] in _CONFIG_TRUE: bond.update({'use_carrier': '1'}) elif opts['use_carrier'] in _CONFIG_FALSE: bond.update({'use_carrier': '0'}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'use_carrier', valid) else: _log_default_iface(iface, 'use_carrier', bond_def['use_carrier']) bond.update({'use_carrier': bond_def['use_carrier']}) return bond def _parse_settings_bond_4(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond4. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '4'} for binding in ['miimon', 'downdelay', 'updelay', 'lacp_rate', 'ad_select']: if binding in opts: if binding == 'lacp_rate': if opts[binding] == 'fast': opts.update({binding: '1'}) if opts[binding] == 'slow': opts.update({binding: '0'}) valid = ['fast', '1', 'slow', '0'] else: valid = ['integer'] try: int(opts[binding]) bond.update({binding: opts[binding]}) except Exception: _raise_error_iface(iface, binding, valid) else: _log_default_iface(iface, binding, bond_def[binding]) bond.update({binding: bond_def[binding]}) if 'use_carrier' in opts: if opts['use_carrier'] in _CONFIG_TRUE: bond.update({'use_carrier': '1'}) elif opts['use_carrier'] in _CONFIG_FALSE: bond.update({'use_carrier': '0'}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'use_carrier', valid) else: _log_default_iface(iface, 'use_carrier', bond_def['use_carrier']) bond.update({'use_carrier': bond_def['use_carrier']}) if 'hashing-algorithm' in opts: valid = ['layer2', 'layer2+3', 'layer3+4'] if opts['hashing-algorithm'] in valid: bond.update({'xmit_hash_policy': opts['hashing-algorithm']}) else: _raise_error_iface(iface, 'hashing-algorithm', valid) return bond def _parse_settings_bond_5(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond5. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '5'} for binding in ['miimon', 'downdelay', 'updelay']: if binding in opts: try: int(opts[binding]) bond.update({binding: opts[binding]}) except Exception: _raise_error_iface(iface, binding, ['integer']) else: _log_default_iface(iface, binding, bond_def[binding]) bond.update({binding: bond_def[binding]}) if 'use_carrier' in opts: if opts['use_carrier'] in _CONFIG_TRUE: bond.update({'use_carrier': '1'}) elif opts['use_carrier'] in _CONFIG_FALSE: bond.update({'use_carrier': '0'}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'use_carrier', valid) else: _log_default_iface(iface, 'use_carrier', bond_def['use_carrier']) bond.update({'use_carrier': bond_def['use_carrier']}) if 'primary' in opts: bond.update({'primary': opts['primary']}) return bond def _parse_settings_bond_6(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond6. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '6'} for binding in ['miimon', 'downdelay', 'updelay']: if binding in opts: try: int(opts[binding]) bond.update({binding: opts[binding]}) except Exception: _raise_error_iface(iface, binding, ['integer']) else: _log_default_iface(iface, binding, bond_def[binding]) bond.update({binding: bond_def[binding]}) if 'use_carrier' in opts: if opts['use_carrier'] in _CONFIG_TRUE: bond.update({'use_carrier': '1'}) elif opts['use_carrier'] in _CONFIG_FALSE: bond.update({'use_carrier': '0'}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'use_carrier', valid) else: _log_default_iface(iface, 'use_carrier', bond_def['use_carrier']) bond.update({'use_carrier': bond_def['use_carrier']}) if 'primary' in opts: bond.update({'primary': opts['primary']}) return bond def _parse_settings_vlan(opts, iface): ''' Filters given options and outputs valid settings for a vlan ''' vlan = {} if 'reorder_hdr' in opts: if opts['reorder_hdr'] in _CONFIG_TRUE + _CONFIG_FALSE: vlan.update({'reorder_hdr': opts['reorder_hdr']}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'reorder_hdr', valid) if 'vlan_id' in opts: if opts['vlan_id'] > 0: vlan.update({'vlan_id': opts['vlan_id']}) else: _raise_error_iface(iface, 'vlan_id', 'Positive integer') if 'phys_dev' in opts: if opts['phys_dev']: vlan.update({'phys_dev': opts['phys_dev']}) else: _raise_error_iface(iface, 'phys_dev', 'Non-empty string') return vlan def _parse_settings_eth(opts, iface_type, enabled, iface): ''' Filters given options and outputs valid settings for a network interface. ''' result = {'name': iface} if 'proto' in opts: valid = ['none', 'bootp', 'dhcp'] if opts['proto'] in valid: result['proto'] = opts['proto'] else: _raise_error_iface(iface, opts['proto'], valid) if 'dns' in opts: result['dns'] = opts['dns'] result['peerdns'] = 'yes' if 'mtu' in opts: try: result['mtu'] = int(opts['mtu']) except ValueError: _raise_error_iface(iface, 'mtu', ['integer']) if iface_type not in ['bridge']: ethtool = _parse_ethtool_opts(opts, iface) if ethtool: result['ethtool'] = ethtool if iface_type == 'slave': result['proto'] = 'none' if iface_type == 'bond': bonding = _parse_settings_bond(opts, iface) if bonding: result['bonding'] = bonding result['devtype'] = "Bond" if iface_type == 'vlan': vlan = _parse_settings_vlan(opts, iface) if vlan: result['devtype'] = "Vlan" for opt in vlan: result[opt] = opts[opt] if iface_type not in ['bond', 'vlan', 'bridge', 'ipip']: auto_addr = False if 'addr' in opts: if salt.utils.validate.net.mac(opts['addr']): result['addr'] = opts['addr'] elif opts['addr'] == 'auto': auto_addr = True elif opts['addr'] != 'none': _raise_error_iface(iface, opts['addr'], ['AA:BB:CC:DD:EE:FF', 'auto', 'none']) else: auto_addr = True if auto_addr: # If interface type is slave for bond, not setting hwaddr if iface_type != 'slave': ifaces = __salt__['network.interfaces']() if iface in ifaces and 'hwaddr' in ifaces[iface]: result['addr'] = ifaces[iface]['hwaddr'] if iface_type == 'eth': result['devtype'] = 'Ethernet' if iface_type == 'bridge': result['devtype'] = 'Bridge' bypassfirewall = True valid = _CONFIG_TRUE + _CONFIG_FALSE for opt in ['bypassfirewall']: if opt in opts: if opts[opt] in _CONFIG_TRUE: bypassfirewall = True elif opts[opt] in _CONFIG_FALSE: bypassfirewall = False else: _raise_error_iface(iface, opts[opt], valid) bridgectls = [ 'net.bridge.bridge-nf-call-ip6tables', 'net.bridge.bridge-nf-call-iptables', 'net.bridge.bridge-nf-call-arptables', ] if bypassfirewall: sysctl_value = 0 else: sysctl_value = 1 for sysctl in bridgectls: try: __salt__['sysctl.persist'](sysctl, sysctl_value) except CommandExecutionError: log.warning('Failed to set sysctl: %s', sysctl) else: if 'bridge' in opts: result['bridge'] = opts['bridge'] if iface_type == 'ipip': result['devtype'] = 'IPIP' for opt in ['my_inner_ipaddr', 'my_outer_ipaddr']: if opt not in opts: _raise_error_iface(iface, opts[opt], ['1.2.3.4']) else: result[opt] = opts[opt] if iface_type == 'ib': result['devtype'] = 'InfiniBand' if 'prefix' in opts: if 'netmask' in opts: msg = 'Cannot use prefix and netmask together' log.error(msg) raise AttributeError(msg) result['prefix'] = opts['prefix'] elif 'netmask' in opts: result['netmask'] = opts['netmask'] for opt in ['ipaddr', 'master', 'srcaddr', 'delay', 'domain', 'gateway', 'uuid', 'nickname', 'zone']: if opt in opts: result[opt] = opts[opt] for opt in ['ipv6addr', 'ipv6gateway']: if opt in opts: result[opt] = opts[opt] if 'ipaddrs' in opts: result['ipaddrs'] = [] for opt in opts['ipaddrs']: if salt.utils.validate.net.ipv4_addr(opt): ip, prefix = [i.strip() for i in opt.split('/')] result['ipaddrs'].append({'ipaddr': ip, 'prefix': prefix}) else: msg = 'ipv4 CIDR is invalid' log.error(msg) raise AttributeError(msg) if 'ipv6addrs' in opts: for opt in opts['ipv6addrs']: if not salt.utils.validate.net.ipv6_addr(opt): msg = 'ipv6 CIDR is invalid' log.error(msg) raise AttributeError(msg) result['ipv6addrs'] = opts['ipv6addrs'] if 'enable_ipv6' in opts: result['enable_ipv6'] = opts['enable_ipv6'] valid = _CONFIG_TRUE + _CONFIG_FALSE for opt in ['onparent', 'peerdns', 'peerroutes', 'slave', 'vlan', 'defroute', 'stp', 'ipv6_peerdns', 'ipv6_defroute', 'ipv6_peerroutes', 'ipv6_autoconf', 'ipv4_failure_fatal', 'dhcpv6c']: if opt in opts: if opts[opt] in _CONFIG_TRUE: result[opt] = 'yes' elif opts[opt] in _CONFIG_FALSE: result[opt] = 'no' else: _raise_error_iface(iface, opts[opt], valid) if 'onboot' in opts: log.warning( 'The \'onboot\' option is controlled by the \'enabled\' option. ' 'Interface: %s Enabled: %s', iface, enabled ) if enabled: result['onboot'] = 'yes' else: result['onboot'] = 'no' # If the interface is defined then we want to always take # control away from non-root users; unless the administrator # wants to allow non-root users to control the device. if 'userctl' in opts: if opts['userctl'] in _CONFIG_TRUE: result['userctl'] = 'yes' elif opts['userctl'] in _CONFIG_FALSE: result['userctl'] = 'no' else: _raise_error_iface(iface, opts['userctl'], valid) else: result['userctl'] = 'no' # This vlan is in opts, and should be only used in range interface # will affect jinja template for interface generating if 'vlan' in opts: if opts['vlan'] in _CONFIG_TRUE: result['vlan'] = 'yes' elif opts['vlan'] in _CONFIG_FALSE: result['vlan'] = 'no' else: _raise_error_iface(iface, opts['vlan'], valid) if 'arpcheck' in opts: if opts['arpcheck'] in _CONFIG_FALSE: result['arpcheck'] = 'no' if 'ipaddr_start' in opts: result['ipaddr_start'] = opts['ipaddr_start'] if 'ipaddr_end' in opts: result['ipaddr_end'] = opts['ipaddr_end'] if 'clonenum_start' in opts: result['clonenum_start'] = opts['clonenum_start'] # If NetworkManager is available, we can control whether we use # it or not if 'nm_controlled' in opts: if opts['nm_controlled'] in _CONFIG_TRUE: result['nm_controlled'] = 'yes' elif opts['nm_controlled'] in _CONFIG_FALSE: result['nm_controlled'] = 'no' else: _raise_error_iface(iface, opts['nm_controlled'], valid) else: result['nm_controlled'] = 'no' return result def _parse_routes(iface, opts): ''' Filters given options and outputs valid settings for the route settings file. ''' # Normalize keys opts = dict((k.lower(), v) for (k, v) in six.iteritems(opts)) result = {} if 'routes' not in opts: _raise_error_routes(iface, 'routes', 'List of routes') for opt in opts: result[opt] = opts[opt] return result def _parse_network_settings(opts, current): ''' Filters given options and outputs valid settings for the global network settings file. ''' # Normalize keys opts = dict((k.lower(), v) for (k, v) in six.iteritems(opts)) current = dict((k.lower(), v) for (k, v) in six.iteritems(current)) # Check for supported parameters retain_settings = opts.get('retain_settings', False) result = current if retain_settings else {} # Default quote type is an empty string, which will not quote values quote_type = '' valid = _CONFIG_TRUE + _CONFIG_FALSE if 'enabled' not in opts: try: opts['networking'] = current['networking'] # If networking option is quoted, use its quote type quote_type = salt.utils.stringutils.is_quoted(opts['networking']) _log_default_network('networking', current['networking']) except ValueError: _raise_error_network('networking', valid) else: opts['networking'] = opts['enabled'] true_val = '{0}yes{0}'.format(quote_type) false_val = '{0}no{0}'.format(quote_type) networking = salt.utils.stringutils.dequote(opts['networking']) if networking in valid: if networking in _CONFIG_TRUE: result['networking'] = true_val elif networking in _CONFIG_FALSE: result['networking'] = false_val else: _raise_error_network('networking', valid) if 'hostname' not in opts: try: opts['hostname'] = current['hostname'] _log_default_network('hostname', current['hostname']) except Exception: _raise_error_network('hostname', ['server1.example.com']) if opts['hostname']: result['hostname'] = '{1}{0}{1}'.format( salt.utils.stringutils.dequote(opts['hostname']), quote_type) else: _raise_error_network('hostname', ['server1.example.com']) if 'nozeroconf' in opts: nozeroconf = salt.utils.stringutils.dequote(opts['nozeroconf']) if nozeroconf in valid: if nozeroconf in _CONFIG_TRUE: result['nozeroconf'] = true_val elif nozeroconf in _CONFIG_FALSE: result['nozeroconf'] = false_val else: _raise_error_network('nozeroconf', valid) for opt in opts: if opt not in ['networking', 'hostname', 'nozeroconf']: result[opt] = '{1}{0}{1}'.format( salt.utils.stringutils.dequote(opts[opt]), quote_type) return result def _raise_error_iface(iface, option, expected): ''' Log and raise an error with a logical formatted message. ''' msg = _error_msg_iface(iface, option, expected) log.error(msg) raise AttributeError(msg) def _raise_error_network(option, expected): ''' Log and raise an error with a logical formatted message. ''' msg = _error_msg_network(option, expected) log.error(msg) raise AttributeError(msg) def _raise_error_routes(iface, option, expected): ''' Log and raise an error with a logical formatted message. ''' msg = _error_msg_routes(iface, option, expected) log.error(msg) raise AttributeError(msg) def _read_file(path): ''' Reads and returns the contents of a file ''' try: with salt.utils.files.fopen(path, 'rb') as rfh: lines = salt.utils.stringutils.to_unicode(rfh.read()).splitlines() try: lines.remove('') except ValueError: pass return lines except Exception: return [] # Return empty list for type consistency def _write_file_network(data, filename): ''' Writes a file to disk ''' with salt.utils.files.fopen(filename, 'w') as fp_: fp_.write(salt.utils.stringutils.to_str(data)) def _read_temp(data): lines = data.splitlines() try: # Discard newlines if they exist lines.remove('') except ValueError: pass return lines def build_bond(iface, **settings): ''' Create a bond script in /etc/modprobe.d with the passed settings and load the bonding kernel module. CLI Example: .. code-block:: bash salt '*' ip.build_bond bond0 mode=balance-alb ''' rh_major = __grains__['osrelease'][:1] opts = _parse_settings_bond(settings, iface) try: template = JINJA.get_template('conf.jinja') except jinja2.exceptions.TemplateNotFound: log.error('Could not load template conf.jinja') return '' data = template.render({'name': iface, 'bonding': opts}) _write_file_iface(iface, data, _RH_NETWORK_CONF_FILES, '{0}.conf'.format(iface)) path = os.path.join(_RH_NETWORK_CONF_FILES, '{0}.conf'.format(iface)) if rh_major == '5': __salt__['cmd.run']( 'sed -i -e "/^alias\\s{0}.*/d" /etc/modprobe.conf'.format(iface), python_shell=False ) __salt__['cmd.run']( 'sed -i -e "/^options\\s{0}.*/d" /etc/modprobe.conf'.format(iface), python_shell=False ) __salt__['file.append']('/etc/modprobe.conf', path) __salt__['kmod.load']('bonding') if settings['test']: return _read_temp(data) return _read_file(path) def build_interface(iface, iface_type, enabled, **settings): ''' Build an interface script for a network interface. CLI Example: .. code-block:: bash salt '*' ip.build_interface eth0 eth <settings> ''' if __grains__['os'] == 'Fedora': if __grains__['osmajorrelease'] >= 18: rh_major = '7' else: rh_major = '6' else: rh_major = __grains__['osrelease'][:1] iface_type = iface_type.lower() if iface_type not in _IFACE_TYPES: _raise_error_iface(iface, iface_type, _IFACE_TYPES) if iface_type == 'slave': settings['slave'] = 'yes' if 'master' not in settings: msg = 'master is a required setting for slave interfaces' log.error(msg) raise AttributeError(msg) if iface_type == 'vlan': settings['vlan'] = 'yes' if iface_type == 'bridge': __salt__['pkg.install']('bridge-utils') if iface_type in ['eth', 'bond', 'bridge', 'slave', 'vlan', 'ipip', 'ib', 'alias']: opts = _parse_settings_eth(settings, iface_type, enabled, iface) try: template = JINJA.get_template('rh{0}_eth.jinja'.format(rh_major)) except jinja2.exceptions.TemplateNotFound: log.error( 'Could not load template rh%s_eth.jinja', rh_major ) return '' ifcfg = template.render(opts) if 'test' in settings and settings['test']: return _read_temp(ifcfg) _write_file_iface(iface, ifcfg, _RH_NETWORK_SCRIPT_DIR, 'ifcfg-{0}') path = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'ifcfg-{0}'.format(iface)) return _read_file(path) def build_routes(iface, **settings): ''' Build a route script for a network interface. CLI Example: .. code-block:: bash salt '*' ip.build_routes eth0 <settings> ''' template = 'rh6_route_eth.jinja' try: if int(__grains__['osrelease'][0]) < 6: template = 'route_eth.jinja' except ValueError: pass log.debug('Template name: %s', template) opts = _parse_routes(iface, settings) log.debug('Opts: \n %s', opts) try: template = JINJA.get_template(template) except jinja2.exceptions.TemplateNotFound: log.error('Could not load template %s', template) return '' opts6 = [] opts4 = [] for route in opts['routes']: ipaddr = route['ipaddr'] if salt.utils.validate.net.ipv6_addr(ipaddr): opts6.append(route) else: opts4.append(route) log.debug("IPv4 routes:\n%s", opts4) log.debug("IPv6 routes:\n%s", opts6) routecfg = template.render(routes=opts4, iface=iface) routecfg6 = template.render(routes=opts6, iface=iface) if settings['test']: routes = _read_temp(routecfg) routes.extend(_read_temp(routecfg6)) return routes _write_file_iface(iface, routecfg, _RH_NETWORK_SCRIPT_DIR, 'route-{0}') _write_file_iface(iface, routecfg6, _RH_NETWORK_SCRIPT_DIR, 'route6-{0}') path = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'route-{0}'.format(iface)) path6 = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'route6-{0}'.format(iface)) routes = _read_file(path) routes.extend(_read_file(path6)) return routes def down(iface, iface_type): ''' Shutdown a network interface CLI Example: .. code-block:: bash salt '*' ip.down eth0 ''' # Slave devices are controlled by the master. if iface_type not in ['slave']: return __salt__['cmd.run']('ifdown {0}'.format(iface)) return None def get_bond(iface): ''' Return the content of a bond script CLI Example: .. code-block:: bash salt '*' ip.get_bond bond0 ''' path = os.path.join(_RH_NETWORK_CONF_FILES, '{0}.conf'.format(iface)) return _read_file(path) def get_interface(iface): ''' Return the contents of an interface script CLI Example: .. code-block:: bash salt '*' ip.get_interface eth0 ''' path = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'ifcfg-{0}'.format(iface)) return _read_file(path) def up(iface, iface_type): # pylint: disable=C0103 ''' Start up a network interface CLI Example: .. code-block:: bash salt '*' ip.up eth0 ''' # Slave devices are controlled by the master. if iface_type not in ['slave']: return __salt__['cmd.run']('ifup {0}'.format(iface)) return None def get_routes(iface): ''' Return the contents of the interface routes script. CLI Example: .. code-block:: bash salt '*' ip.get_routes eth0 ''' path = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'route-{0}'.format(iface)) path6 = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'route6-{0}'.format(iface)) routes = _read_file(path) routes.extend(_read_file(path6)) return routes def get_network_settings(): ''' Return the contents of the global network script. CLI Example: .. code-block:: bash salt '*' ip.get_network_settings ''' return _read_file(_RH_NETWORK_FILE) def apply_network_settings(**settings): ''' Apply global network configuration. CLI Example: .. code-block:: bash salt '*' ip.apply_network_settings ''' if 'require_reboot' not in settings: settings['require_reboot'] = False if 'apply_hostname' not in settings: settings['apply_hostname'] = False hostname_res = True if settings['apply_hostname'] in _CONFIG_TRUE: if 'hostname' in settings: hostname_res = __salt__['network.mod_hostname'](settings['hostname']) else: log.warning( 'The network state sls is trying to apply hostname ' 'changes but no hostname is defined.' ) hostname_res = False res = True if settings['require_reboot'] in _CONFIG_TRUE: log.warning( 'The network state sls is requiring a reboot of the system to ' 'properly apply network configuration.' ) res = True else: res = __salt__['service.restart']('network') return hostname_res and res def build_network_settings(**settings): ''' Build the global network script. CLI Example: .. code-block:: bash salt '*' ip.build_network_settings <settings> ''' # Read current configuration and store default values current_network_settings = _parse_rh_config(_RH_NETWORK_FILE) # Build settings opts = _parse_network_settings(settings, current_network_settings) try: template = JINJA.get_template('network.jinja') except jinja2.exceptions.TemplateNotFound: log.error('Could not load template network.jinja') return '' network = template.render(opts) if settings['test']: return _read_temp(network) # Write settings _write_file_network(network, _RH_NETWORK_FILE) return _read_file(_RH_NETWORK_FILE)
saltstack/salt
salt/modules/rh_ip.py
_write_file_network
python
def _write_file_network(data, filename): ''' Writes a file to disk ''' with salt.utils.files.fopen(filename, 'w') as fp_: fp_.write(salt.utils.stringutils.to_str(data))
Writes a file to disk
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/rh_ip.py#L963-L968
[ "def fopen(*args, **kwargs):\n '''\n Wrapper around open() built-in to set CLOEXEC on the fd.\n\n This flag specifies that the file descriptor should be closed when an exec\n function is invoked;\n\n When a file descriptor is allocated (as with open or dup), this bit is\n initially cleared on the new file descriptor, meaning that descriptor will\n survive into the new program after exec.\n\n NB! We still have small race condition between open and fcntl.\n '''\n if six.PY3:\n try:\n # Don't permit stdin/stdout/stderr to be opened. The boolean False\n # and True are treated by Python 3's open() as file descriptors 0\n # and 1, respectively.\n if args[0] in (0, 1, 2):\n raise TypeError(\n '{0} is not a permitted file descriptor'.format(args[0])\n )\n except IndexError:\n pass\n binary = None\n # ensure 'binary' mode is always used on Windows in Python 2\n if ((six.PY2 and salt.utils.platform.is_windows() and 'binary' not in kwargs) or\n kwargs.pop('binary', False)):\n if len(args) > 1:\n args = list(args)\n if 'b' not in args[1]:\n args[1] = args[1].replace('t', 'b')\n if 'b' not in args[1]:\n args[1] += 'b'\n elif kwargs.get('mode'):\n if 'b' not in kwargs['mode']:\n kwargs['mode'] = kwargs['mode'].replace('t', 'b')\n if 'b' not in kwargs['mode']:\n kwargs['mode'] += 'b'\n else:\n # the default is to read\n kwargs['mode'] = 'rb'\n elif six.PY3 and 'encoding' not in kwargs:\n # In Python 3, if text mode is used and the encoding\n # is not specified, set the encoding to 'utf-8'.\n binary = False\n if len(args) > 1:\n args = list(args)\n if 'b' in args[1]:\n binary = True\n if kwargs.get('mode', None):\n if 'b' in kwargs['mode']:\n binary = True\n if not binary:\n kwargs['encoding'] = __salt_system_encoding__\n\n if six.PY3 and not binary and not kwargs.get('newline', None):\n kwargs['newline'] = ''\n\n f_handle = open(*args, **kwargs) # pylint: disable=resource-leakage\n\n if is_fcntl_available():\n # modify the file descriptor on systems with fcntl\n # unix and unix-like systems only\n try:\n FD_CLOEXEC = fcntl.FD_CLOEXEC # pylint: disable=C0103\n except AttributeError:\n FD_CLOEXEC = 1 # pylint: disable=C0103\n old_flags = fcntl.fcntl(f_handle.fileno(), fcntl.F_GETFD)\n fcntl.fcntl(f_handle.fileno(), fcntl.F_SETFD, old_flags | FD_CLOEXEC)\n\n return f_handle\n", "def to_str(s, encoding=None, errors='strict', normalize=False):\n '''\n Given str, bytes, bytearray, or unicode (py2), return str\n '''\n def _normalize(s):\n try:\n return unicodedata.normalize('NFC', s) if normalize else s\n except TypeError:\n return s\n\n if encoding is None:\n # Try utf-8 first, and fall back to detected encoding\n encoding = ('utf-8', __salt_system_encoding__)\n if not isinstance(encoding, (tuple, list)):\n encoding = (encoding,)\n\n if not encoding:\n raise ValueError('encoding cannot be empty')\n\n # This shouldn't be six.string_types because if we're on PY2 and we already\n # have a string, we should just return it.\n if isinstance(s, str):\n return _normalize(s)\n\n exc = None\n if six.PY3:\n if isinstance(s, (bytes, bytearray)):\n for enc in encoding:\n try:\n return _normalize(s.decode(enc, errors))\n except UnicodeDecodeError as err:\n exc = err\n continue\n # The only way we get this far is if a UnicodeDecodeError was\n # raised, otherwise we would have already returned (or raised some\n # other exception).\n raise exc # pylint: disable=raising-bad-type\n raise TypeError('expected str, bytes, or bytearray not {}'.format(type(s)))\n else:\n if isinstance(s, bytearray):\n return str(s) # future lint: disable=blacklisted-function\n if isinstance(s, unicode): # pylint: disable=incompatible-py3-code,undefined-variable\n for enc in encoding:\n try:\n return _normalize(s).encode(enc, errors)\n except UnicodeEncodeError as err:\n exc = err\n continue\n # The only way we get this far is if a UnicodeDecodeError was\n # raised, otherwise we would have already returned (or raised some\n # other exception).\n raise exc # pylint: disable=raising-bad-type\n raise TypeError('expected str, bytearray, or unicode')\n" ]
# -*- coding: utf-8 -*- ''' The networking module for RHEL/Fedora based distros ''' from __future__ import absolute_import, unicode_literals, print_function # Import python libs import logging import os.path import os # Import third party libs import jinja2 import jinja2.exceptions # Import salt libs import salt.utils.files import salt.utils.stringutils import salt.utils.templates import salt.utils.validate.net from salt.exceptions import CommandExecutionError from salt.ext import six # Set up logging log = logging.getLogger(__name__) # Set up template environment JINJA = jinja2.Environment( loader=jinja2.FileSystemLoader( os.path.join(salt.utils.templates.TEMPLATE_DIRNAME, 'rh_ip') ) ) # Define the module's virtual name __virtualname__ = 'ip' def __virtual__(): ''' Confine this module to RHEL/Fedora based distros ''' if __grains__['os_family'] == 'RedHat': return __virtualname__ return (False, 'The rh_ip execution module cannot be loaded: this module is only available on RHEL/Fedora based distributions.') # Setup networking attributes _ETHTOOL_CONFIG_OPTS = [ 'autoneg', 'speed', 'duplex', 'rx', 'tx', 'sg', 'tso', 'ufo', 'gso', 'gro', 'lro', 'advertise' ] _RH_CONFIG_OPTS = [ 'domain', 'peerdns', 'peerntp', 'defroute', 'mtu', 'static-routes', 'gateway', 'zone' ] _RH_CONFIG_BONDING_OPTS = [ 'mode', 'miimon', 'arp_interval', 'arp_ip_target', 'downdelay', 'updelay', 'use_carrier', 'lacp_rate', 'hashing-algorithm', 'max_bonds', 'tx_queues', 'num_grat_arp', 'num_unsol_na', 'primary', 'primary_reselect', 'ad_select', 'xmit_hash_policy', 'arp_validate', 'fail_over_mac', 'all_slaves_active', 'resend_igmp' ] _RH_NETWORK_SCRIPT_DIR = '/etc/sysconfig/network-scripts' _RH_NETWORK_FILE = '/etc/sysconfig/network' _RH_NETWORK_CONF_FILES = '/etc/modprobe.d' _CONFIG_TRUE = ['yes', 'on', 'true', '1', True] _CONFIG_FALSE = ['no', 'off', 'false', '0', False] _IFACE_TYPES = [ 'eth', 'bond', 'alias', 'clone', 'ipsec', 'dialup', 'bridge', 'slave', 'vlan', 'ipip', 'ib', ] def _error_msg_iface(iface, option, expected): ''' Build an appropriate error message from a given option and a list of expected values. ''' msg = 'Invalid option -- Interface: {0}, Option: {1}, Expected: [{2}]' return msg.format(iface, option, '|'.join(str(e) for e in expected)) def _error_msg_routes(iface, option, expected): ''' Build an appropriate error message from a given option and a list of expected values. ''' msg = 'Invalid option -- Route interface: {0}, Option: {1}, Expected: [{2}]' return msg.format(iface, option, expected) def _log_default_iface(iface, opt, value): log.info('Using default option -- Interface: %s Option: %s Value: %s', iface, opt, value) def _error_msg_network(option, expected): ''' Build an appropriate error message from a given option and a list of expected values. ''' msg = 'Invalid network setting -- Setting: {0}, Expected: [{1}]' return msg.format(option, '|'.join(str(e) for e in expected)) def _log_default_network(opt, value): log.info('Using existing setting -- Setting: %s Value: %s', opt, value) def _parse_rh_config(path): rh_config = _read_file(path) cv_rh_config = {} if rh_config: for line in rh_config: line = line.strip() if not line or line.startswith('!') or line.startswith('#'): continue pair = [p.rstrip() for p in line.split('=', 1)] if len(pair) != 2: continue name, value = pair cv_rh_config[name.upper()] = value return cv_rh_config def _parse_ethtool_opts(opts, iface): ''' Filters given options and outputs valid settings for ETHTOOLS_OPTS If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' config = {} if 'autoneg' in opts: if opts['autoneg'] in _CONFIG_TRUE: config.update({'autoneg': 'on'}) elif opts['autoneg'] in _CONFIG_FALSE: config.update({'autoneg': 'off'}) else: _raise_error_iface(iface, 'autoneg', _CONFIG_TRUE + _CONFIG_FALSE) if 'duplex' in opts: valid = ['full', 'half'] if opts['duplex'] in valid: config.update({'duplex': opts['duplex']}) else: _raise_error_iface(iface, 'duplex', valid) if 'speed' in opts: valid = ['10', '100', '1000', '10000'] if six.text_type(opts['speed']) in valid: config.update({'speed': opts['speed']}) else: _raise_error_iface(iface, opts['speed'], valid) if 'advertise' in opts: valid = [ '0x001', '0x002', '0x004', '0x008', '0x010', '0x020', '0x20000', '0x8000', '0x1000', '0x40000', '0x80000', '0x200000', '0x400000', '0x800000', '0x1000000', '0x2000000', '0x4000000' ] if six.text_type(opts['advertise']) in valid: config.update({'advertise': opts['advertise']}) else: _raise_error_iface(iface, 'advertise', valid) valid = _CONFIG_TRUE + _CONFIG_FALSE for option in ('rx', 'tx', 'sg', 'tso', 'ufo', 'gso', 'gro', 'lro'): if option in opts: if opts[option] in _CONFIG_TRUE: config.update({option: 'on'}) elif opts[option] in _CONFIG_FALSE: config.update({option: 'off'}) else: _raise_error_iface(iface, option, valid) return config def _parse_settings_bond(opts, iface): ''' Filters given options and outputs valid settings for requested operation. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond_def = { # 803.ad aggregation selection logic # 0 for stable (default) # 1 for bandwidth # 2 for count 'ad_select': '0', # Max number of transmit queues (default = 16) 'tx_queues': '16', # Link monitoring in milliseconds. Most NICs support this 'miimon': '100', # ARP interval in milliseconds 'arp_interval': '250', # Delay before considering link down in milliseconds (miimon * 2) 'downdelay': '200', # lacp_rate 0: Slow - every 30 seconds # lacp_rate 1: Fast - every 1 second 'lacp_rate': '0', # Max bonds for this driver 'max_bonds': '1', # Specifies the time, in milliseconds, to wait before # enabling a slave after a link recovery has been # detected. Only used with miimon. 'updelay': '0', # Used with miimon. # On: driver sends mii # Off: ethtool sends mii 'use_carrier': '0', # Default. Don't change unless you know what you are doing. 'xmit_hash_policy': 'layer2', } if opts['mode'] in ['balance-rr', '0']: log.info( 'Device: %s Bonding Mode: load balancing (round-robin)', iface ) return _parse_settings_bond_0(opts, iface, bond_def) elif opts['mode'] in ['active-backup', '1']: log.info( 'Device: %s Bonding Mode: fault-tolerance (active-backup)', iface ) return _parse_settings_bond_1(opts, iface, bond_def) elif opts['mode'] in ['balance-xor', '2']: log.info( 'Device: %s Bonding Mode: load balancing (xor)', iface ) return _parse_settings_bond_2(opts, iface, bond_def) elif opts['mode'] in ['broadcast', '3']: log.info( 'Device: %s Bonding Mode: fault-tolerance (broadcast)', iface ) return _parse_settings_bond_3(opts, iface, bond_def) elif opts['mode'] in ['802.3ad', '4']: log.info( 'Device: %s Bonding Mode: IEEE 802.3ad Dynamic link ' 'aggregation', iface ) return _parse_settings_bond_4(opts, iface, bond_def) elif opts['mode'] in ['balance-tlb', '5']: log.info( 'Device: %s Bonding Mode: transmit load balancing', iface ) return _parse_settings_bond_5(opts, iface, bond_def) elif opts['mode'] in ['balance-alb', '6']: log.info( 'Device: %s Bonding Mode: adaptive load balancing', iface ) return _parse_settings_bond_6(opts, iface, bond_def) else: valid = [ '0', '1', '2', '3', '4', '5', '6', 'balance-rr', 'active-backup', 'balance-xor', 'broadcast', '802.3ad', 'balance-tlb', 'balance-alb' ] _raise_error_iface(iface, 'mode', valid) def _parse_settings_bond_0(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond0. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' # balance-rr shares miimon settings with balance-xor bond = _parse_settings_bond_1(opts, iface, bond_def) bond.update({'mode': '0'}) # ARP targets in n.n.n.n form valid = ['list of ips (up to 16)'] if 'arp_ip_target' in opts: if isinstance(opts['arp_ip_target'], list): if 1 <= len(opts['arp_ip_target']) <= 16: bond.update({'arp_ip_target': ''}) for ip in opts['arp_ip_target']: # pylint: disable=C0103 if bond['arp_ip_target']: bond['arp_ip_target'] = bond['arp_ip_target'] + ',' + ip else: bond['arp_ip_target'] = ip else: _raise_error_iface(iface, 'arp_ip_target', valid) else: _raise_error_iface(iface, 'arp_ip_target', valid) elif 'miimon' not in opts: _raise_error_iface(iface, 'arp_ip_target', valid) if 'arp_interval' in opts: try: int(opts['arp_interval']) bond.update({'arp_interval': opts['arp_interval']}) except Exception: _raise_error_iface(iface, 'arp_interval', ['integer']) else: _log_default_iface(iface, 'arp_interval', bond_def['arp_interval']) bond.update({'arp_interval': bond_def['arp_interval']}) return bond def _parse_settings_bond_1(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond1. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '1'} for binding in ['miimon', 'downdelay', 'updelay']: if binding in opts: try: int(opts[binding]) bond.update({binding: opts[binding]}) except Exception: _raise_error_iface(iface, binding, ['integer']) else: _log_default_iface(iface, binding, bond_def[binding]) bond.update({binding: bond_def[binding]}) if 'use_carrier' in opts: if opts['use_carrier'] in _CONFIG_TRUE: bond.update({'use_carrier': '1'}) elif opts['use_carrier'] in _CONFIG_FALSE: bond.update({'use_carrier': '0'}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'use_carrier', valid) else: _log_default_iface(iface, 'use_carrier', bond_def['use_carrier']) bond.update({'use_carrier': bond_def['use_carrier']}) if 'primary' in opts: bond.update({'primary': opts['primary']}) return bond def _parse_settings_bond_2(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond2. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '2'} valid = ['list of ips (up to 16)'] if 'arp_ip_target' in opts: if isinstance(opts['arp_ip_target'], list): if 1 <= len(opts['arp_ip_target']) <= 16: bond.update({'arp_ip_target': ''}) for ip in opts['arp_ip_target']: # pylint: disable=C0103 if bond['arp_ip_target']: bond['arp_ip_target'] = bond['arp_ip_target'] + ',' + ip else: bond['arp_ip_target'] = ip else: _raise_error_iface(iface, 'arp_ip_target', valid) else: _raise_error_iface(iface, 'arp_ip_target', valid) else: _raise_error_iface(iface, 'arp_ip_target', valid) if 'arp_interval' in opts: try: int(opts['arp_interval']) bond.update({'arp_interval': opts['arp_interval']}) except Exception: _raise_error_iface(iface, 'arp_interval', ['integer']) else: _log_default_iface(iface, 'arp_interval', bond_def['arp_interval']) bond.update({'arp_interval': bond_def['arp_interval']}) if 'hashing-algorithm' in opts: valid = ['layer2', 'layer2+3', 'layer3+4'] if opts['hashing-algorithm'] in valid: bond.update({'xmit_hash_policy': opts['hashing-algorithm']}) else: _raise_error_iface(iface, 'hashing-algorithm', valid) return bond def _parse_settings_bond_3(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond3. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '3'} for binding in ['miimon', 'downdelay', 'updelay']: if binding in opts: try: int(opts[binding]) bond.update({binding: opts[binding]}) except Exception: _raise_error_iface(iface, binding, ['integer']) else: _log_default_iface(iface, binding, bond_def[binding]) bond.update({binding: bond_def[binding]}) if 'use_carrier' in opts: if opts['use_carrier'] in _CONFIG_TRUE: bond.update({'use_carrier': '1'}) elif opts['use_carrier'] in _CONFIG_FALSE: bond.update({'use_carrier': '0'}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'use_carrier', valid) else: _log_default_iface(iface, 'use_carrier', bond_def['use_carrier']) bond.update({'use_carrier': bond_def['use_carrier']}) return bond def _parse_settings_bond_4(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond4. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '4'} for binding in ['miimon', 'downdelay', 'updelay', 'lacp_rate', 'ad_select']: if binding in opts: if binding == 'lacp_rate': if opts[binding] == 'fast': opts.update({binding: '1'}) if opts[binding] == 'slow': opts.update({binding: '0'}) valid = ['fast', '1', 'slow', '0'] else: valid = ['integer'] try: int(opts[binding]) bond.update({binding: opts[binding]}) except Exception: _raise_error_iface(iface, binding, valid) else: _log_default_iface(iface, binding, bond_def[binding]) bond.update({binding: bond_def[binding]}) if 'use_carrier' in opts: if opts['use_carrier'] in _CONFIG_TRUE: bond.update({'use_carrier': '1'}) elif opts['use_carrier'] in _CONFIG_FALSE: bond.update({'use_carrier': '0'}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'use_carrier', valid) else: _log_default_iface(iface, 'use_carrier', bond_def['use_carrier']) bond.update({'use_carrier': bond_def['use_carrier']}) if 'hashing-algorithm' in opts: valid = ['layer2', 'layer2+3', 'layer3+4'] if opts['hashing-algorithm'] in valid: bond.update({'xmit_hash_policy': opts['hashing-algorithm']}) else: _raise_error_iface(iface, 'hashing-algorithm', valid) return bond def _parse_settings_bond_5(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond5. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '5'} for binding in ['miimon', 'downdelay', 'updelay']: if binding in opts: try: int(opts[binding]) bond.update({binding: opts[binding]}) except Exception: _raise_error_iface(iface, binding, ['integer']) else: _log_default_iface(iface, binding, bond_def[binding]) bond.update({binding: bond_def[binding]}) if 'use_carrier' in opts: if opts['use_carrier'] in _CONFIG_TRUE: bond.update({'use_carrier': '1'}) elif opts['use_carrier'] in _CONFIG_FALSE: bond.update({'use_carrier': '0'}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'use_carrier', valid) else: _log_default_iface(iface, 'use_carrier', bond_def['use_carrier']) bond.update({'use_carrier': bond_def['use_carrier']}) if 'primary' in opts: bond.update({'primary': opts['primary']}) return bond def _parse_settings_bond_6(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond6. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '6'} for binding in ['miimon', 'downdelay', 'updelay']: if binding in opts: try: int(opts[binding]) bond.update({binding: opts[binding]}) except Exception: _raise_error_iface(iface, binding, ['integer']) else: _log_default_iface(iface, binding, bond_def[binding]) bond.update({binding: bond_def[binding]}) if 'use_carrier' in opts: if opts['use_carrier'] in _CONFIG_TRUE: bond.update({'use_carrier': '1'}) elif opts['use_carrier'] in _CONFIG_FALSE: bond.update({'use_carrier': '0'}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'use_carrier', valid) else: _log_default_iface(iface, 'use_carrier', bond_def['use_carrier']) bond.update({'use_carrier': bond_def['use_carrier']}) if 'primary' in opts: bond.update({'primary': opts['primary']}) return bond def _parse_settings_vlan(opts, iface): ''' Filters given options and outputs valid settings for a vlan ''' vlan = {} if 'reorder_hdr' in opts: if opts['reorder_hdr'] in _CONFIG_TRUE + _CONFIG_FALSE: vlan.update({'reorder_hdr': opts['reorder_hdr']}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'reorder_hdr', valid) if 'vlan_id' in opts: if opts['vlan_id'] > 0: vlan.update({'vlan_id': opts['vlan_id']}) else: _raise_error_iface(iface, 'vlan_id', 'Positive integer') if 'phys_dev' in opts: if opts['phys_dev']: vlan.update({'phys_dev': opts['phys_dev']}) else: _raise_error_iface(iface, 'phys_dev', 'Non-empty string') return vlan def _parse_settings_eth(opts, iface_type, enabled, iface): ''' Filters given options and outputs valid settings for a network interface. ''' result = {'name': iface} if 'proto' in opts: valid = ['none', 'bootp', 'dhcp'] if opts['proto'] in valid: result['proto'] = opts['proto'] else: _raise_error_iface(iface, opts['proto'], valid) if 'dns' in opts: result['dns'] = opts['dns'] result['peerdns'] = 'yes' if 'mtu' in opts: try: result['mtu'] = int(opts['mtu']) except ValueError: _raise_error_iface(iface, 'mtu', ['integer']) if iface_type not in ['bridge']: ethtool = _parse_ethtool_opts(opts, iface) if ethtool: result['ethtool'] = ethtool if iface_type == 'slave': result['proto'] = 'none' if iface_type == 'bond': bonding = _parse_settings_bond(opts, iface) if bonding: result['bonding'] = bonding result['devtype'] = "Bond" if iface_type == 'vlan': vlan = _parse_settings_vlan(opts, iface) if vlan: result['devtype'] = "Vlan" for opt in vlan: result[opt] = opts[opt] if iface_type not in ['bond', 'vlan', 'bridge', 'ipip']: auto_addr = False if 'addr' in opts: if salt.utils.validate.net.mac(opts['addr']): result['addr'] = opts['addr'] elif opts['addr'] == 'auto': auto_addr = True elif opts['addr'] != 'none': _raise_error_iface(iface, opts['addr'], ['AA:BB:CC:DD:EE:FF', 'auto', 'none']) else: auto_addr = True if auto_addr: # If interface type is slave for bond, not setting hwaddr if iface_type != 'slave': ifaces = __salt__['network.interfaces']() if iface in ifaces and 'hwaddr' in ifaces[iface]: result['addr'] = ifaces[iface]['hwaddr'] if iface_type == 'eth': result['devtype'] = 'Ethernet' if iface_type == 'bridge': result['devtype'] = 'Bridge' bypassfirewall = True valid = _CONFIG_TRUE + _CONFIG_FALSE for opt in ['bypassfirewall']: if opt in opts: if opts[opt] in _CONFIG_TRUE: bypassfirewall = True elif opts[opt] in _CONFIG_FALSE: bypassfirewall = False else: _raise_error_iface(iface, opts[opt], valid) bridgectls = [ 'net.bridge.bridge-nf-call-ip6tables', 'net.bridge.bridge-nf-call-iptables', 'net.bridge.bridge-nf-call-arptables', ] if bypassfirewall: sysctl_value = 0 else: sysctl_value = 1 for sysctl in bridgectls: try: __salt__['sysctl.persist'](sysctl, sysctl_value) except CommandExecutionError: log.warning('Failed to set sysctl: %s', sysctl) else: if 'bridge' in opts: result['bridge'] = opts['bridge'] if iface_type == 'ipip': result['devtype'] = 'IPIP' for opt in ['my_inner_ipaddr', 'my_outer_ipaddr']: if opt not in opts: _raise_error_iface(iface, opts[opt], ['1.2.3.4']) else: result[opt] = opts[opt] if iface_type == 'ib': result['devtype'] = 'InfiniBand' if 'prefix' in opts: if 'netmask' in opts: msg = 'Cannot use prefix and netmask together' log.error(msg) raise AttributeError(msg) result['prefix'] = opts['prefix'] elif 'netmask' in opts: result['netmask'] = opts['netmask'] for opt in ['ipaddr', 'master', 'srcaddr', 'delay', 'domain', 'gateway', 'uuid', 'nickname', 'zone']: if opt in opts: result[opt] = opts[opt] for opt in ['ipv6addr', 'ipv6gateway']: if opt in opts: result[opt] = opts[opt] if 'ipaddrs' in opts: result['ipaddrs'] = [] for opt in opts['ipaddrs']: if salt.utils.validate.net.ipv4_addr(opt): ip, prefix = [i.strip() for i in opt.split('/')] result['ipaddrs'].append({'ipaddr': ip, 'prefix': prefix}) else: msg = 'ipv4 CIDR is invalid' log.error(msg) raise AttributeError(msg) if 'ipv6addrs' in opts: for opt in opts['ipv6addrs']: if not salt.utils.validate.net.ipv6_addr(opt): msg = 'ipv6 CIDR is invalid' log.error(msg) raise AttributeError(msg) result['ipv6addrs'] = opts['ipv6addrs'] if 'enable_ipv6' in opts: result['enable_ipv6'] = opts['enable_ipv6'] valid = _CONFIG_TRUE + _CONFIG_FALSE for opt in ['onparent', 'peerdns', 'peerroutes', 'slave', 'vlan', 'defroute', 'stp', 'ipv6_peerdns', 'ipv6_defroute', 'ipv6_peerroutes', 'ipv6_autoconf', 'ipv4_failure_fatal', 'dhcpv6c']: if opt in opts: if opts[opt] in _CONFIG_TRUE: result[opt] = 'yes' elif opts[opt] in _CONFIG_FALSE: result[opt] = 'no' else: _raise_error_iface(iface, opts[opt], valid) if 'onboot' in opts: log.warning( 'The \'onboot\' option is controlled by the \'enabled\' option. ' 'Interface: %s Enabled: %s', iface, enabled ) if enabled: result['onboot'] = 'yes' else: result['onboot'] = 'no' # If the interface is defined then we want to always take # control away from non-root users; unless the administrator # wants to allow non-root users to control the device. if 'userctl' in opts: if opts['userctl'] in _CONFIG_TRUE: result['userctl'] = 'yes' elif opts['userctl'] in _CONFIG_FALSE: result['userctl'] = 'no' else: _raise_error_iface(iface, opts['userctl'], valid) else: result['userctl'] = 'no' # This vlan is in opts, and should be only used in range interface # will affect jinja template for interface generating if 'vlan' in opts: if opts['vlan'] in _CONFIG_TRUE: result['vlan'] = 'yes' elif opts['vlan'] in _CONFIG_FALSE: result['vlan'] = 'no' else: _raise_error_iface(iface, opts['vlan'], valid) if 'arpcheck' in opts: if opts['arpcheck'] in _CONFIG_FALSE: result['arpcheck'] = 'no' if 'ipaddr_start' in opts: result['ipaddr_start'] = opts['ipaddr_start'] if 'ipaddr_end' in opts: result['ipaddr_end'] = opts['ipaddr_end'] if 'clonenum_start' in opts: result['clonenum_start'] = opts['clonenum_start'] # If NetworkManager is available, we can control whether we use # it or not if 'nm_controlled' in opts: if opts['nm_controlled'] in _CONFIG_TRUE: result['nm_controlled'] = 'yes' elif opts['nm_controlled'] in _CONFIG_FALSE: result['nm_controlled'] = 'no' else: _raise_error_iface(iface, opts['nm_controlled'], valid) else: result['nm_controlled'] = 'no' return result def _parse_routes(iface, opts): ''' Filters given options and outputs valid settings for the route settings file. ''' # Normalize keys opts = dict((k.lower(), v) for (k, v) in six.iteritems(opts)) result = {} if 'routes' not in opts: _raise_error_routes(iface, 'routes', 'List of routes') for opt in opts: result[opt] = opts[opt] return result def _parse_network_settings(opts, current): ''' Filters given options and outputs valid settings for the global network settings file. ''' # Normalize keys opts = dict((k.lower(), v) for (k, v) in six.iteritems(opts)) current = dict((k.lower(), v) for (k, v) in six.iteritems(current)) # Check for supported parameters retain_settings = opts.get('retain_settings', False) result = current if retain_settings else {} # Default quote type is an empty string, which will not quote values quote_type = '' valid = _CONFIG_TRUE + _CONFIG_FALSE if 'enabled' not in opts: try: opts['networking'] = current['networking'] # If networking option is quoted, use its quote type quote_type = salt.utils.stringutils.is_quoted(opts['networking']) _log_default_network('networking', current['networking']) except ValueError: _raise_error_network('networking', valid) else: opts['networking'] = opts['enabled'] true_val = '{0}yes{0}'.format(quote_type) false_val = '{0}no{0}'.format(quote_type) networking = salt.utils.stringutils.dequote(opts['networking']) if networking in valid: if networking in _CONFIG_TRUE: result['networking'] = true_val elif networking in _CONFIG_FALSE: result['networking'] = false_val else: _raise_error_network('networking', valid) if 'hostname' not in opts: try: opts['hostname'] = current['hostname'] _log_default_network('hostname', current['hostname']) except Exception: _raise_error_network('hostname', ['server1.example.com']) if opts['hostname']: result['hostname'] = '{1}{0}{1}'.format( salt.utils.stringutils.dequote(opts['hostname']), quote_type) else: _raise_error_network('hostname', ['server1.example.com']) if 'nozeroconf' in opts: nozeroconf = salt.utils.stringutils.dequote(opts['nozeroconf']) if nozeroconf in valid: if nozeroconf in _CONFIG_TRUE: result['nozeroconf'] = true_val elif nozeroconf in _CONFIG_FALSE: result['nozeroconf'] = false_val else: _raise_error_network('nozeroconf', valid) for opt in opts: if opt not in ['networking', 'hostname', 'nozeroconf']: result[opt] = '{1}{0}{1}'.format( salt.utils.stringutils.dequote(opts[opt]), quote_type) return result def _raise_error_iface(iface, option, expected): ''' Log and raise an error with a logical formatted message. ''' msg = _error_msg_iface(iface, option, expected) log.error(msg) raise AttributeError(msg) def _raise_error_network(option, expected): ''' Log and raise an error with a logical formatted message. ''' msg = _error_msg_network(option, expected) log.error(msg) raise AttributeError(msg) def _raise_error_routes(iface, option, expected): ''' Log and raise an error with a logical formatted message. ''' msg = _error_msg_routes(iface, option, expected) log.error(msg) raise AttributeError(msg) def _read_file(path): ''' Reads and returns the contents of a file ''' try: with salt.utils.files.fopen(path, 'rb') as rfh: lines = salt.utils.stringutils.to_unicode(rfh.read()).splitlines() try: lines.remove('') except ValueError: pass return lines except Exception: return [] # Return empty list for type consistency def _write_file_iface(iface, data, folder, pattern): ''' Writes a file to disk ''' filename = os.path.join(folder, pattern.format(iface)) if not os.path.exists(folder): msg = '{0} cannot be written. {1} does not exist' msg = msg.format(filename, folder) log.error(msg) raise AttributeError(msg) with salt.utils.files.fopen(filename, 'w') as fp_: fp_.write(salt.utils.stringutils.to_str(data)) def _read_temp(data): lines = data.splitlines() try: # Discard newlines if they exist lines.remove('') except ValueError: pass return lines def build_bond(iface, **settings): ''' Create a bond script in /etc/modprobe.d with the passed settings and load the bonding kernel module. CLI Example: .. code-block:: bash salt '*' ip.build_bond bond0 mode=balance-alb ''' rh_major = __grains__['osrelease'][:1] opts = _parse_settings_bond(settings, iface) try: template = JINJA.get_template('conf.jinja') except jinja2.exceptions.TemplateNotFound: log.error('Could not load template conf.jinja') return '' data = template.render({'name': iface, 'bonding': opts}) _write_file_iface(iface, data, _RH_NETWORK_CONF_FILES, '{0}.conf'.format(iface)) path = os.path.join(_RH_NETWORK_CONF_FILES, '{0}.conf'.format(iface)) if rh_major == '5': __salt__['cmd.run']( 'sed -i -e "/^alias\\s{0}.*/d" /etc/modprobe.conf'.format(iface), python_shell=False ) __salt__['cmd.run']( 'sed -i -e "/^options\\s{0}.*/d" /etc/modprobe.conf'.format(iface), python_shell=False ) __salt__['file.append']('/etc/modprobe.conf', path) __salt__['kmod.load']('bonding') if settings['test']: return _read_temp(data) return _read_file(path) def build_interface(iface, iface_type, enabled, **settings): ''' Build an interface script for a network interface. CLI Example: .. code-block:: bash salt '*' ip.build_interface eth0 eth <settings> ''' if __grains__['os'] == 'Fedora': if __grains__['osmajorrelease'] >= 18: rh_major = '7' else: rh_major = '6' else: rh_major = __grains__['osrelease'][:1] iface_type = iface_type.lower() if iface_type not in _IFACE_TYPES: _raise_error_iface(iface, iface_type, _IFACE_TYPES) if iface_type == 'slave': settings['slave'] = 'yes' if 'master' not in settings: msg = 'master is a required setting for slave interfaces' log.error(msg) raise AttributeError(msg) if iface_type == 'vlan': settings['vlan'] = 'yes' if iface_type == 'bridge': __salt__['pkg.install']('bridge-utils') if iface_type in ['eth', 'bond', 'bridge', 'slave', 'vlan', 'ipip', 'ib', 'alias']: opts = _parse_settings_eth(settings, iface_type, enabled, iface) try: template = JINJA.get_template('rh{0}_eth.jinja'.format(rh_major)) except jinja2.exceptions.TemplateNotFound: log.error( 'Could not load template rh%s_eth.jinja', rh_major ) return '' ifcfg = template.render(opts) if 'test' in settings and settings['test']: return _read_temp(ifcfg) _write_file_iface(iface, ifcfg, _RH_NETWORK_SCRIPT_DIR, 'ifcfg-{0}') path = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'ifcfg-{0}'.format(iface)) return _read_file(path) def build_routes(iface, **settings): ''' Build a route script for a network interface. CLI Example: .. code-block:: bash salt '*' ip.build_routes eth0 <settings> ''' template = 'rh6_route_eth.jinja' try: if int(__grains__['osrelease'][0]) < 6: template = 'route_eth.jinja' except ValueError: pass log.debug('Template name: %s', template) opts = _parse_routes(iface, settings) log.debug('Opts: \n %s', opts) try: template = JINJA.get_template(template) except jinja2.exceptions.TemplateNotFound: log.error('Could not load template %s', template) return '' opts6 = [] opts4 = [] for route in opts['routes']: ipaddr = route['ipaddr'] if salt.utils.validate.net.ipv6_addr(ipaddr): opts6.append(route) else: opts4.append(route) log.debug("IPv4 routes:\n%s", opts4) log.debug("IPv6 routes:\n%s", opts6) routecfg = template.render(routes=opts4, iface=iface) routecfg6 = template.render(routes=opts6, iface=iface) if settings['test']: routes = _read_temp(routecfg) routes.extend(_read_temp(routecfg6)) return routes _write_file_iface(iface, routecfg, _RH_NETWORK_SCRIPT_DIR, 'route-{0}') _write_file_iface(iface, routecfg6, _RH_NETWORK_SCRIPT_DIR, 'route6-{0}') path = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'route-{0}'.format(iface)) path6 = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'route6-{0}'.format(iface)) routes = _read_file(path) routes.extend(_read_file(path6)) return routes def down(iface, iface_type): ''' Shutdown a network interface CLI Example: .. code-block:: bash salt '*' ip.down eth0 ''' # Slave devices are controlled by the master. if iface_type not in ['slave']: return __salt__['cmd.run']('ifdown {0}'.format(iface)) return None def get_bond(iface): ''' Return the content of a bond script CLI Example: .. code-block:: bash salt '*' ip.get_bond bond0 ''' path = os.path.join(_RH_NETWORK_CONF_FILES, '{0}.conf'.format(iface)) return _read_file(path) def get_interface(iface): ''' Return the contents of an interface script CLI Example: .. code-block:: bash salt '*' ip.get_interface eth0 ''' path = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'ifcfg-{0}'.format(iface)) return _read_file(path) def up(iface, iface_type): # pylint: disable=C0103 ''' Start up a network interface CLI Example: .. code-block:: bash salt '*' ip.up eth0 ''' # Slave devices are controlled by the master. if iface_type not in ['slave']: return __salt__['cmd.run']('ifup {0}'.format(iface)) return None def get_routes(iface): ''' Return the contents of the interface routes script. CLI Example: .. code-block:: bash salt '*' ip.get_routes eth0 ''' path = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'route-{0}'.format(iface)) path6 = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'route6-{0}'.format(iface)) routes = _read_file(path) routes.extend(_read_file(path6)) return routes def get_network_settings(): ''' Return the contents of the global network script. CLI Example: .. code-block:: bash salt '*' ip.get_network_settings ''' return _read_file(_RH_NETWORK_FILE) def apply_network_settings(**settings): ''' Apply global network configuration. CLI Example: .. code-block:: bash salt '*' ip.apply_network_settings ''' if 'require_reboot' not in settings: settings['require_reboot'] = False if 'apply_hostname' not in settings: settings['apply_hostname'] = False hostname_res = True if settings['apply_hostname'] in _CONFIG_TRUE: if 'hostname' in settings: hostname_res = __salt__['network.mod_hostname'](settings['hostname']) else: log.warning( 'The network state sls is trying to apply hostname ' 'changes but no hostname is defined.' ) hostname_res = False res = True if settings['require_reboot'] in _CONFIG_TRUE: log.warning( 'The network state sls is requiring a reboot of the system to ' 'properly apply network configuration.' ) res = True else: res = __salt__['service.restart']('network') return hostname_res and res def build_network_settings(**settings): ''' Build the global network script. CLI Example: .. code-block:: bash salt '*' ip.build_network_settings <settings> ''' # Read current configuration and store default values current_network_settings = _parse_rh_config(_RH_NETWORK_FILE) # Build settings opts = _parse_network_settings(settings, current_network_settings) try: template = JINJA.get_template('network.jinja') except jinja2.exceptions.TemplateNotFound: log.error('Could not load template network.jinja') return '' network = template.render(opts) if settings['test']: return _read_temp(network) # Write settings _write_file_network(network, _RH_NETWORK_FILE) return _read_file(_RH_NETWORK_FILE)
saltstack/salt
salt/modules/rh_ip.py
build_bond
python
def build_bond(iface, **settings): ''' Create a bond script in /etc/modprobe.d with the passed settings and load the bonding kernel module. CLI Example: .. code-block:: bash salt '*' ip.build_bond bond0 mode=balance-alb ''' rh_major = __grains__['osrelease'][:1] opts = _parse_settings_bond(settings, iface) try: template = JINJA.get_template('conf.jinja') except jinja2.exceptions.TemplateNotFound: log.error('Could not load template conf.jinja') return '' data = template.render({'name': iface, 'bonding': opts}) _write_file_iface(iface, data, _RH_NETWORK_CONF_FILES, '{0}.conf'.format(iface)) path = os.path.join(_RH_NETWORK_CONF_FILES, '{0}.conf'.format(iface)) if rh_major == '5': __salt__['cmd.run']( 'sed -i -e "/^alias\\s{0}.*/d" /etc/modprobe.conf'.format(iface), python_shell=False ) __salt__['cmd.run']( 'sed -i -e "/^options\\s{0}.*/d" /etc/modprobe.conf'.format(iface), python_shell=False ) __salt__['file.append']('/etc/modprobe.conf', path) __salt__['kmod.load']('bonding') if settings['test']: return _read_temp(data) return _read_file(path)
Create a bond script in /etc/modprobe.d with the passed settings and load the bonding kernel module. CLI Example: .. code-block:: bash salt '*' ip.build_bond bond0 mode=balance-alb
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/rh_ip.py#L980-L1017
[ "def _read_file(path):\n '''\n Reads and returns the contents of a file\n '''\n try:\n with salt.utils.files.fopen(path, 'rb') as rfh:\n lines = salt.utils.stringutils.to_unicode(rfh.read()).splitlines()\n try:\n lines.remove('')\n except ValueError:\n pass\n return lines\n except Exception:\n return [] # Return empty list for type consistency\n", "def _parse_settings_bond(opts, iface):\n '''\n Filters given options and outputs valid settings for requested\n operation. If an option has a value that is not expected, this\n function will log what the Interface, Setting and what it was\n expecting.\n '''\n\n bond_def = {\n # 803.ad aggregation selection logic\n # 0 for stable (default)\n # 1 for bandwidth\n # 2 for count\n 'ad_select': '0',\n # Max number of transmit queues (default = 16)\n 'tx_queues': '16',\n # Link monitoring in milliseconds. Most NICs support this\n 'miimon': '100',\n # ARP interval in milliseconds\n 'arp_interval': '250',\n # Delay before considering link down in milliseconds (miimon * 2)\n 'downdelay': '200',\n # lacp_rate 0: Slow - every 30 seconds\n # lacp_rate 1: Fast - every 1 second\n 'lacp_rate': '0',\n # Max bonds for this driver\n 'max_bonds': '1',\n # Specifies the time, in milliseconds, to wait before\n # enabling a slave after a link recovery has been\n # detected. Only used with miimon.\n 'updelay': '0',\n # Used with miimon.\n # On: driver sends mii\n # Off: ethtool sends mii\n 'use_carrier': '0',\n # Default. Don't change unless you know what you are doing.\n 'xmit_hash_policy': 'layer2',\n }\n\n if opts['mode'] in ['balance-rr', '0']:\n log.info(\n 'Device: %s Bonding Mode: load balancing (round-robin)',\n iface\n )\n return _parse_settings_bond_0(opts, iface, bond_def)\n elif opts['mode'] in ['active-backup', '1']:\n log.info(\n 'Device: %s Bonding Mode: fault-tolerance (active-backup)',\n iface\n )\n return _parse_settings_bond_1(opts, iface, bond_def)\n elif opts['mode'] in ['balance-xor', '2']:\n log.info(\n 'Device: %s Bonding Mode: load balancing (xor)',\n iface\n )\n return _parse_settings_bond_2(opts, iface, bond_def)\n elif opts['mode'] in ['broadcast', '3']:\n log.info(\n 'Device: %s Bonding Mode: fault-tolerance (broadcast)',\n iface\n )\n return _parse_settings_bond_3(opts, iface, bond_def)\n elif opts['mode'] in ['802.3ad', '4']:\n log.info(\n 'Device: %s Bonding Mode: IEEE 802.3ad Dynamic link '\n 'aggregation', iface\n )\n return _parse_settings_bond_4(opts, iface, bond_def)\n elif opts['mode'] in ['balance-tlb', '5']:\n log.info(\n 'Device: %s Bonding Mode: transmit load balancing', iface\n )\n return _parse_settings_bond_5(opts, iface, bond_def)\n elif opts['mode'] in ['balance-alb', '6']:\n log.info(\n 'Device: %s Bonding Mode: adaptive load balancing', iface\n )\n return _parse_settings_bond_6(opts, iface, bond_def)\n else:\n valid = [\n '0', '1', '2', '3', '4', '5', '6',\n 'balance-rr', 'active-backup', 'balance-xor',\n 'broadcast', '802.3ad', 'balance-tlb', 'balance-alb'\n ]\n _raise_error_iface(iface, 'mode', valid)\n", "def _read_temp(data):\n lines = data.splitlines()\n try: # Discard newlines if they exist\n lines.remove('')\n except ValueError:\n pass\n return lines\n", "def _write_file_iface(iface, data, folder, pattern):\n '''\n Writes a file to disk\n '''\n filename = os.path.join(folder, pattern.format(iface))\n if not os.path.exists(folder):\n msg = '{0} cannot be written. {1} does not exist'\n msg = msg.format(filename, folder)\n log.error(msg)\n raise AttributeError(msg)\n with salt.utils.files.fopen(filename, 'w') as fp_:\n fp_.write(salt.utils.stringutils.to_str(data))\n" ]
# -*- coding: utf-8 -*- ''' The networking module for RHEL/Fedora based distros ''' from __future__ import absolute_import, unicode_literals, print_function # Import python libs import logging import os.path import os # Import third party libs import jinja2 import jinja2.exceptions # Import salt libs import salt.utils.files import salt.utils.stringutils import salt.utils.templates import salt.utils.validate.net from salt.exceptions import CommandExecutionError from salt.ext import six # Set up logging log = logging.getLogger(__name__) # Set up template environment JINJA = jinja2.Environment( loader=jinja2.FileSystemLoader( os.path.join(salt.utils.templates.TEMPLATE_DIRNAME, 'rh_ip') ) ) # Define the module's virtual name __virtualname__ = 'ip' def __virtual__(): ''' Confine this module to RHEL/Fedora based distros ''' if __grains__['os_family'] == 'RedHat': return __virtualname__ return (False, 'The rh_ip execution module cannot be loaded: this module is only available on RHEL/Fedora based distributions.') # Setup networking attributes _ETHTOOL_CONFIG_OPTS = [ 'autoneg', 'speed', 'duplex', 'rx', 'tx', 'sg', 'tso', 'ufo', 'gso', 'gro', 'lro', 'advertise' ] _RH_CONFIG_OPTS = [ 'domain', 'peerdns', 'peerntp', 'defroute', 'mtu', 'static-routes', 'gateway', 'zone' ] _RH_CONFIG_BONDING_OPTS = [ 'mode', 'miimon', 'arp_interval', 'arp_ip_target', 'downdelay', 'updelay', 'use_carrier', 'lacp_rate', 'hashing-algorithm', 'max_bonds', 'tx_queues', 'num_grat_arp', 'num_unsol_na', 'primary', 'primary_reselect', 'ad_select', 'xmit_hash_policy', 'arp_validate', 'fail_over_mac', 'all_slaves_active', 'resend_igmp' ] _RH_NETWORK_SCRIPT_DIR = '/etc/sysconfig/network-scripts' _RH_NETWORK_FILE = '/etc/sysconfig/network' _RH_NETWORK_CONF_FILES = '/etc/modprobe.d' _CONFIG_TRUE = ['yes', 'on', 'true', '1', True] _CONFIG_FALSE = ['no', 'off', 'false', '0', False] _IFACE_TYPES = [ 'eth', 'bond', 'alias', 'clone', 'ipsec', 'dialup', 'bridge', 'slave', 'vlan', 'ipip', 'ib', ] def _error_msg_iface(iface, option, expected): ''' Build an appropriate error message from a given option and a list of expected values. ''' msg = 'Invalid option -- Interface: {0}, Option: {1}, Expected: [{2}]' return msg.format(iface, option, '|'.join(str(e) for e in expected)) def _error_msg_routes(iface, option, expected): ''' Build an appropriate error message from a given option and a list of expected values. ''' msg = 'Invalid option -- Route interface: {0}, Option: {1}, Expected: [{2}]' return msg.format(iface, option, expected) def _log_default_iface(iface, opt, value): log.info('Using default option -- Interface: %s Option: %s Value: %s', iface, opt, value) def _error_msg_network(option, expected): ''' Build an appropriate error message from a given option and a list of expected values. ''' msg = 'Invalid network setting -- Setting: {0}, Expected: [{1}]' return msg.format(option, '|'.join(str(e) for e in expected)) def _log_default_network(opt, value): log.info('Using existing setting -- Setting: %s Value: %s', opt, value) def _parse_rh_config(path): rh_config = _read_file(path) cv_rh_config = {} if rh_config: for line in rh_config: line = line.strip() if not line or line.startswith('!') or line.startswith('#'): continue pair = [p.rstrip() for p in line.split('=', 1)] if len(pair) != 2: continue name, value = pair cv_rh_config[name.upper()] = value return cv_rh_config def _parse_ethtool_opts(opts, iface): ''' Filters given options and outputs valid settings for ETHTOOLS_OPTS If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' config = {} if 'autoneg' in opts: if opts['autoneg'] in _CONFIG_TRUE: config.update({'autoneg': 'on'}) elif opts['autoneg'] in _CONFIG_FALSE: config.update({'autoneg': 'off'}) else: _raise_error_iface(iface, 'autoneg', _CONFIG_TRUE + _CONFIG_FALSE) if 'duplex' in opts: valid = ['full', 'half'] if opts['duplex'] in valid: config.update({'duplex': opts['duplex']}) else: _raise_error_iface(iface, 'duplex', valid) if 'speed' in opts: valid = ['10', '100', '1000', '10000'] if six.text_type(opts['speed']) in valid: config.update({'speed': opts['speed']}) else: _raise_error_iface(iface, opts['speed'], valid) if 'advertise' in opts: valid = [ '0x001', '0x002', '0x004', '0x008', '0x010', '0x020', '0x20000', '0x8000', '0x1000', '0x40000', '0x80000', '0x200000', '0x400000', '0x800000', '0x1000000', '0x2000000', '0x4000000' ] if six.text_type(opts['advertise']) in valid: config.update({'advertise': opts['advertise']}) else: _raise_error_iface(iface, 'advertise', valid) valid = _CONFIG_TRUE + _CONFIG_FALSE for option in ('rx', 'tx', 'sg', 'tso', 'ufo', 'gso', 'gro', 'lro'): if option in opts: if opts[option] in _CONFIG_TRUE: config.update({option: 'on'}) elif opts[option] in _CONFIG_FALSE: config.update({option: 'off'}) else: _raise_error_iface(iface, option, valid) return config def _parse_settings_bond(opts, iface): ''' Filters given options and outputs valid settings for requested operation. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond_def = { # 803.ad aggregation selection logic # 0 for stable (default) # 1 for bandwidth # 2 for count 'ad_select': '0', # Max number of transmit queues (default = 16) 'tx_queues': '16', # Link monitoring in milliseconds. Most NICs support this 'miimon': '100', # ARP interval in milliseconds 'arp_interval': '250', # Delay before considering link down in milliseconds (miimon * 2) 'downdelay': '200', # lacp_rate 0: Slow - every 30 seconds # lacp_rate 1: Fast - every 1 second 'lacp_rate': '0', # Max bonds for this driver 'max_bonds': '1', # Specifies the time, in milliseconds, to wait before # enabling a slave after a link recovery has been # detected. Only used with miimon. 'updelay': '0', # Used with miimon. # On: driver sends mii # Off: ethtool sends mii 'use_carrier': '0', # Default. Don't change unless you know what you are doing. 'xmit_hash_policy': 'layer2', } if opts['mode'] in ['balance-rr', '0']: log.info( 'Device: %s Bonding Mode: load balancing (round-robin)', iface ) return _parse_settings_bond_0(opts, iface, bond_def) elif opts['mode'] in ['active-backup', '1']: log.info( 'Device: %s Bonding Mode: fault-tolerance (active-backup)', iface ) return _parse_settings_bond_1(opts, iface, bond_def) elif opts['mode'] in ['balance-xor', '2']: log.info( 'Device: %s Bonding Mode: load balancing (xor)', iface ) return _parse_settings_bond_2(opts, iface, bond_def) elif opts['mode'] in ['broadcast', '3']: log.info( 'Device: %s Bonding Mode: fault-tolerance (broadcast)', iface ) return _parse_settings_bond_3(opts, iface, bond_def) elif opts['mode'] in ['802.3ad', '4']: log.info( 'Device: %s Bonding Mode: IEEE 802.3ad Dynamic link ' 'aggregation', iface ) return _parse_settings_bond_4(opts, iface, bond_def) elif opts['mode'] in ['balance-tlb', '5']: log.info( 'Device: %s Bonding Mode: transmit load balancing', iface ) return _parse_settings_bond_5(opts, iface, bond_def) elif opts['mode'] in ['balance-alb', '6']: log.info( 'Device: %s Bonding Mode: adaptive load balancing', iface ) return _parse_settings_bond_6(opts, iface, bond_def) else: valid = [ '0', '1', '2', '3', '4', '5', '6', 'balance-rr', 'active-backup', 'balance-xor', 'broadcast', '802.3ad', 'balance-tlb', 'balance-alb' ] _raise_error_iface(iface, 'mode', valid) def _parse_settings_bond_0(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond0. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' # balance-rr shares miimon settings with balance-xor bond = _parse_settings_bond_1(opts, iface, bond_def) bond.update({'mode': '0'}) # ARP targets in n.n.n.n form valid = ['list of ips (up to 16)'] if 'arp_ip_target' in opts: if isinstance(opts['arp_ip_target'], list): if 1 <= len(opts['arp_ip_target']) <= 16: bond.update({'arp_ip_target': ''}) for ip in opts['arp_ip_target']: # pylint: disable=C0103 if bond['arp_ip_target']: bond['arp_ip_target'] = bond['arp_ip_target'] + ',' + ip else: bond['arp_ip_target'] = ip else: _raise_error_iface(iface, 'arp_ip_target', valid) else: _raise_error_iface(iface, 'arp_ip_target', valid) elif 'miimon' not in opts: _raise_error_iface(iface, 'arp_ip_target', valid) if 'arp_interval' in opts: try: int(opts['arp_interval']) bond.update({'arp_interval': opts['arp_interval']}) except Exception: _raise_error_iface(iface, 'arp_interval', ['integer']) else: _log_default_iface(iface, 'arp_interval', bond_def['arp_interval']) bond.update({'arp_interval': bond_def['arp_interval']}) return bond def _parse_settings_bond_1(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond1. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '1'} for binding in ['miimon', 'downdelay', 'updelay']: if binding in opts: try: int(opts[binding]) bond.update({binding: opts[binding]}) except Exception: _raise_error_iface(iface, binding, ['integer']) else: _log_default_iface(iface, binding, bond_def[binding]) bond.update({binding: bond_def[binding]}) if 'use_carrier' in opts: if opts['use_carrier'] in _CONFIG_TRUE: bond.update({'use_carrier': '1'}) elif opts['use_carrier'] in _CONFIG_FALSE: bond.update({'use_carrier': '0'}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'use_carrier', valid) else: _log_default_iface(iface, 'use_carrier', bond_def['use_carrier']) bond.update({'use_carrier': bond_def['use_carrier']}) if 'primary' in opts: bond.update({'primary': opts['primary']}) return bond def _parse_settings_bond_2(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond2. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '2'} valid = ['list of ips (up to 16)'] if 'arp_ip_target' in opts: if isinstance(opts['arp_ip_target'], list): if 1 <= len(opts['arp_ip_target']) <= 16: bond.update({'arp_ip_target': ''}) for ip in opts['arp_ip_target']: # pylint: disable=C0103 if bond['arp_ip_target']: bond['arp_ip_target'] = bond['arp_ip_target'] + ',' + ip else: bond['arp_ip_target'] = ip else: _raise_error_iface(iface, 'arp_ip_target', valid) else: _raise_error_iface(iface, 'arp_ip_target', valid) else: _raise_error_iface(iface, 'arp_ip_target', valid) if 'arp_interval' in opts: try: int(opts['arp_interval']) bond.update({'arp_interval': opts['arp_interval']}) except Exception: _raise_error_iface(iface, 'arp_interval', ['integer']) else: _log_default_iface(iface, 'arp_interval', bond_def['arp_interval']) bond.update({'arp_interval': bond_def['arp_interval']}) if 'hashing-algorithm' in opts: valid = ['layer2', 'layer2+3', 'layer3+4'] if opts['hashing-algorithm'] in valid: bond.update({'xmit_hash_policy': opts['hashing-algorithm']}) else: _raise_error_iface(iface, 'hashing-algorithm', valid) return bond def _parse_settings_bond_3(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond3. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '3'} for binding in ['miimon', 'downdelay', 'updelay']: if binding in opts: try: int(opts[binding]) bond.update({binding: opts[binding]}) except Exception: _raise_error_iface(iface, binding, ['integer']) else: _log_default_iface(iface, binding, bond_def[binding]) bond.update({binding: bond_def[binding]}) if 'use_carrier' in opts: if opts['use_carrier'] in _CONFIG_TRUE: bond.update({'use_carrier': '1'}) elif opts['use_carrier'] in _CONFIG_FALSE: bond.update({'use_carrier': '0'}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'use_carrier', valid) else: _log_default_iface(iface, 'use_carrier', bond_def['use_carrier']) bond.update({'use_carrier': bond_def['use_carrier']}) return bond def _parse_settings_bond_4(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond4. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '4'} for binding in ['miimon', 'downdelay', 'updelay', 'lacp_rate', 'ad_select']: if binding in opts: if binding == 'lacp_rate': if opts[binding] == 'fast': opts.update({binding: '1'}) if opts[binding] == 'slow': opts.update({binding: '0'}) valid = ['fast', '1', 'slow', '0'] else: valid = ['integer'] try: int(opts[binding]) bond.update({binding: opts[binding]}) except Exception: _raise_error_iface(iface, binding, valid) else: _log_default_iface(iface, binding, bond_def[binding]) bond.update({binding: bond_def[binding]}) if 'use_carrier' in opts: if opts['use_carrier'] in _CONFIG_TRUE: bond.update({'use_carrier': '1'}) elif opts['use_carrier'] in _CONFIG_FALSE: bond.update({'use_carrier': '0'}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'use_carrier', valid) else: _log_default_iface(iface, 'use_carrier', bond_def['use_carrier']) bond.update({'use_carrier': bond_def['use_carrier']}) if 'hashing-algorithm' in opts: valid = ['layer2', 'layer2+3', 'layer3+4'] if opts['hashing-algorithm'] in valid: bond.update({'xmit_hash_policy': opts['hashing-algorithm']}) else: _raise_error_iface(iface, 'hashing-algorithm', valid) return bond def _parse_settings_bond_5(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond5. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '5'} for binding in ['miimon', 'downdelay', 'updelay']: if binding in opts: try: int(opts[binding]) bond.update({binding: opts[binding]}) except Exception: _raise_error_iface(iface, binding, ['integer']) else: _log_default_iface(iface, binding, bond_def[binding]) bond.update({binding: bond_def[binding]}) if 'use_carrier' in opts: if opts['use_carrier'] in _CONFIG_TRUE: bond.update({'use_carrier': '1'}) elif opts['use_carrier'] in _CONFIG_FALSE: bond.update({'use_carrier': '0'}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'use_carrier', valid) else: _log_default_iface(iface, 'use_carrier', bond_def['use_carrier']) bond.update({'use_carrier': bond_def['use_carrier']}) if 'primary' in opts: bond.update({'primary': opts['primary']}) return bond def _parse_settings_bond_6(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond6. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '6'} for binding in ['miimon', 'downdelay', 'updelay']: if binding in opts: try: int(opts[binding]) bond.update({binding: opts[binding]}) except Exception: _raise_error_iface(iface, binding, ['integer']) else: _log_default_iface(iface, binding, bond_def[binding]) bond.update({binding: bond_def[binding]}) if 'use_carrier' in opts: if opts['use_carrier'] in _CONFIG_TRUE: bond.update({'use_carrier': '1'}) elif opts['use_carrier'] in _CONFIG_FALSE: bond.update({'use_carrier': '0'}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'use_carrier', valid) else: _log_default_iface(iface, 'use_carrier', bond_def['use_carrier']) bond.update({'use_carrier': bond_def['use_carrier']}) if 'primary' in opts: bond.update({'primary': opts['primary']}) return bond def _parse_settings_vlan(opts, iface): ''' Filters given options and outputs valid settings for a vlan ''' vlan = {} if 'reorder_hdr' in opts: if opts['reorder_hdr'] in _CONFIG_TRUE + _CONFIG_FALSE: vlan.update({'reorder_hdr': opts['reorder_hdr']}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'reorder_hdr', valid) if 'vlan_id' in opts: if opts['vlan_id'] > 0: vlan.update({'vlan_id': opts['vlan_id']}) else: _raise_error_iface(iface, 'vlan_id', 'Positive integer') if 'phys_dev' in opts: if opts['phys_dev']: vlan.update({'phys_dev': opts['phys_dev']}) else: _raise_error_iface(iface, 'phys_dev', 'Non-empty string') return vlan def _parse_settings_eth(opts, iface_type, enabled, iface): ''' Filters given options and outputs valid settings for a network interface. ''' result = {'name': iface} if 'proto' in opts: valid = ['none', 'bootp', 'dhcp'] if opts['proto'] in valid: result['proto'] = opts['proto'] else: _raise_error_iface(iface, opts['proto'], valid) if 'dns' in opts: result['dns'] = opts['dns'] result['peerdns'] = 'yes' if 'mtu' in opts: try: result['mtu'] = int(opts['mtu']) except ValueError: _raise_error_iface(iface, 'mtu', ['integer']) if iface_type not in ['bridge']: ethtool = _parse_ethtool_opts(opts, iface) if ethtool: result['ethtool'] = ethtool if iface_type == 'slave': result['proto'] = 'none' if iface_type == 'bond': bonding = _parse_settings_bond(opts, iface) if bonding: result['bonding'] = bonding result['devtype'] = "Bond" if iface_type == 'vlan': vlan = _parse_settings_vlan(opts, iface) if vlan: result['devtype'] = "Vlan" for opt in vlan: result[opt] = opts[opt] if iface_type not in ['bond', 'vlan', 'bridge', 'ipip']: auto_addr = False if 'addr' in opts: if salt.utils.validate.net.mac(opts['addr']): result['addr'] = opts['addr'] elif opts['addr'] == 'auto': auto_addr = True elif opts['addr'] != 'none': _raise_error_iface(iface, opts['addr'], ['AA:BB:CC:DD:EE:FF', 'auto', 'none']) else: auto_addr = True if auto_addr: # If interface type is slave for bond, not setting hwaddr if iface_type != 'slave': ifaces = __salt__['network.interfaces']() if iface in ifaces and 'hwaddr' in ifaces[iface]: result['addr'] = ifaces[iface]['hwaddr'] if iface_type == 'eth': result['devtype'] = 'Ethernet' if iface_type == 'bridge': result['devtype'] = 'Bridge' bypassfirewall = True valid = _CONFIG_TRUE + _CONFIG_FALSE for opt in ['bypassfirewall']: if opt in opts: if opts[opt] in _CONFIG_TRUE: bypassfirewall = True elif opts[opt] in _CONFIG_FALSE: bypassfirewall = False else: _raise_error_iface(iface, opts[opt], valid) bridgectls = [ 'net.bridge.bridge-nf-call-ip6tables', 'net.bridge.bridge-nf-call-iptables', 'net.bridge.bridge-nf-call-arptables', ] if bypassfirewall: sysctl_value = 0 else: sysctl_value = 1 for sysctl in bridgectls: try: __salt__['sysctl.persist'](sysctl, sysctl_value) except CommandExecutionError: log.warning('Failed to set sysctl: %s', sysctl) else: if 'bridge' in opts: result['bridge'] = opts['bridge'] if iface_type == 'ipip': result['devtype'] = 'IPIP' for opt in ['my_inner_ipaddr', 'my_outer_ipaddr']: if opt not in opts: _raise_error_iface(iface, opts[opt], ['1.2.3.4']) else: result[opt] = opts[opt] if iface_type == 'ib': result['devtype'] = 'InfiniBand' if 'prefix' in opts: if 'netmask' in opts: msg = 'Cannot use prefix and netmask together' log.error(msg) raise AttributeError(msg) result['prefix'] = opts['prefix'] elif 'netmask' in opts: result['netmask'] = opts['netmask'] for opt in ['ipaddr', 'master', 'srcaddr', 'delay', 'domain', 'gateway', 'uuid', 'nickname', 'zone']: if opt in opts: result[opt] = opts[opt] for opt in ['ipv6addr', 'ipv6gateway']: if opt in opts: result[opt] = opts[opt] if 'ipaddrs' in opts: result['ipaddrs'] = [] for opt in opts['ipaddrs']: if salt.utils.validate.net.ipv4_addr(opt): ip, prefix = [i.strip() for i in opt.split('/')] result['ipaddrs'].append({'ipaddr': ip, 'prefix': prefix}) else: msg = 'ipv4 CIDR is invalid' log.error(msg) raise AttributeError(msg) if 'ipv6addrs' in opts: for opt in opts['ipv6addrs']: if not salt.utils.validate.net.ipv6_addr(opt): msg = 'ipv6 CIDR is invalid' log.error(msg) raise AttributeError(msg) result['ipv6addrs'] = opts['ipv6addrs'] if 'enable_ipv6' in opts: result['enable_ipv6'] = opts['enable_ipv6'] valid = _CONFIG_TRUE + _CONFIG_FALSE for opt in ['onparent', 'peerdns', 'peerroutes', 'slave', 'vlan', 'defroute', 'stp', 'ipv6_peerdns', 'ipv6_defroute', 'ipv6_peerroutes', 'ipv6_autoconf', 'ipv4_failure_fatal', 'dhcpv6c']: if opt in opts: if opts[opt] in _CONFIG_TRUE: result[opt] = 'yes' elif opts[opt] in _CONFIG_FALSE: result[opt] = 'no' else: _raise_error_iface(iface, opts[opt], valid) if 'onboot' in opts: log.warning( 'The \'onboot\' option is controlled by the \'enabled\' option. ' 'Interface: %s Enabled: %s', iface, enabled ) if enabled: result['onboot'] = 'yes' else: result['onboot'] = 'no' # If the interface is defined then we want to always take # control away from non-root users; unless the administrator # wants to allow non-root users to control the device. if 'userctl' in opts: if opts['userctl'] in _CONFIG_TRUE: result['userctl'] = 'yes' elif opts['userctl'] in _CONFIG_FALSE: result['userctl'] = 'no' else: _raise_error_iface(iface, opts['userctl'], valid) else: result['userctl'] = 'no' # This vlan is in opts, and should be only used in range interface # will affect jinja template for interface generating if 'vlan' in opts: if opts['vlan'] in _CONFIG_TRUE: result['vlan'] = 'yes' elif opts['vlan'] in _CONFIG_FALSE: result['vlan'] = 'no' else: _raise_error_iface(iface, opts['vlan'], valid) if 'arpcheck' in opts: if opts['arpcheck'] in _CONFIG_FALSE: result['arpcheck'] = 'no' if 'ipaddr_start' in opts: result['ipaddr_start'] = opts['ipaddr_start'] if 'ipaddr_end' in opts: result['ipaddr_end'] = opts['ipaddr_end'] if 'clonenum_start' in opts: result['clonenum_start'] = opts['clonenum_start'] # If NetworkManager is available, we can control whether we use # it or not if 'nm_controlled' in opts: if opts['nm_controlled'] in _CONFIG_TRUE: result['nm_controlled'] = 'yes' elif opts['nm_controlled'] in _CONFIG_FALSE: result['nm_controlled'] = 'no' else: _raise_error_iface(iface, opts['nm_controlled'], valid) else: result['nm_controlled'] = 'no' return result def _parse_routes(iface, opts): ''' Filters given options and outputs valid settings for the route settings file. ''' # Normalize keys opts = dict((k.lower(), v) for (k, v) in six.iteritems(opts)) result = {} if 'routes' not in opts: _raise_error_routes(iface, 'routes', 'List of routes') for opt in opts: result[opt] = opts[opt] return result def _parse_network_settings(opts, current): ''' Filters given options and outputs valid settings for the global network settings file. ''' # Normalize keys opts = dict((k.lower(), v) for (k, v) in six.iteritems(opts)) current = dict((k.lower(), v) for (k, v) in six.iteritems(current)) # Check for supported parameters retain_settings = opts.get('retain_settings', False) result = current if retain_settings else {} # Default quote type is an empty string, which will not quote values quote_type = '' valid = _CONFIG_TRUE + _CONFIG_FALSE if 'enabled' not in opts: try: opts['networking'] = current['networking'] # If networking option is quoted, use its quote type quote_type = salt.utils.stringutils.is_quoted(opts['networking']) _log_default_network('networking', current['networking']) except ValueError: _raise_error_network('networking', valid) else: opts['networking'] = opts['enabled'] true_val = '{0}yes{0}'.format(quote_type) false_val = '{0}no{0}'.format(quote_type) networking = salt.utils.stringutils.dequote(opts['networking']) if networking in valid: if networking in _CONFIG_TRUE: result['networking'] = true_val elif networking in _CONFIG_FALSE: result['networking'] = false_val else: _raise_error_network('networking', valid) if 'hostname' not in opts: try: opts['hostname'] = current['hostname'] _log_default_network('hostname', current['hostname']) except Exception: _raise_error_network('hostname', ['server1.example.com']) if opts['hostname']: result['hostname'] = '{1}{0}{1}'.format( salt.utils.stringutils.dequote(opts['hostname']), quote_type) else: _raise_error_network('hostname', ['server1.example.com']) if 'nozeroconf' in opts: nozeroconf = salt.utils.stringutils.dequote(opts['nozeroconf']) if nozeroconf in valid: if nozeroconf in _CONFIG_TRUE: result['nozeroconf'] = true_val elif nozeroconf in _CONFIG_FALSE: result['nozeroconf'] = false_val else: _raise_error_network('nozeroconf', valid) for opt in opts: if opt not in ['networking', 'hostname', 'nozeroconf']: result[opt] = '{1}{0}{1}'.format( salt.utils.stringutils.dequote(opts[opt]), quote_type) return result def _raise_error_iface(iface, option, expected): ''' Log and raise an error with a logical formatted message. ''' msg = _error_msg_iface(iface, option, expected) log.error(msg) raise AttributeError(msg) def _raise_error_network(option, expected): ''' Log and raise an error with a logical formatted message. ''' msg = _error_msg_network(option, expected) log.error(msg) raise AttributeError(msg) def _raise_error_routes(iface, option, expected): ''' Log and raise an error with a logical formatted message. ''' msg = _error_msg_routes(iface, option, expected) log.error(msg) raise AttributeError(msg) def _read_file(path): ''' Reads and returns the contents of a file ''' try: with salt.utils.files.fopen(path, 'rb') as rfh: lines = salt.utils.stringutils.to_unicode(rfh.read()).splitlines() try: lines.remove('') except ValueError: pass return lines except Exception: return [] # Return empty list for type consistency def _write_file_iface(iface, data, folder, pattern): ''' Writes a file to disk ''' filename = os.path.join(folder, pattern.format(iface)) if not os.path.exists(folder): msg = '{0} cannot be written. {1} does not exist' msg = msg.format(filename, folder) log.error(msg) raise AttributeError(msg) with salt.utils.files.fopen(filename, 'w') as fp_: fp_.write(salt.utils.stringutils.to_str(data)) def _write_file_network(data, filename): ''' Writes a file to disk ''' with salt.utils.files.fopen(filename, 'w') as fp_: fp_.write(salt.utils.stringutils.to_str(data)) def _read_temp(data): lines = data.splitlines() try: # Discard newlines if they exist lines.remove('') except ValueError: pass return lines def build_interface(iface, iface_type, enabled, **settings): ''' Build an interface script for a network interface. CLI Example: .. code-block:: bash salt '*' ip.build_interface eth0 eth <settings> ''' if __grains__['os'] == 'Fedora': if __grains__['osmajorrelease'] >= 18: rh_major = '7' else: rh_major = '6' else: rh_major = __grains__['osrelease'][:1] iface_type = iface_type.lower() if iface_type not in _IFACE_TYPES: _raise_error_iface(iface, iface_type, _IFACE_TYPES) if iface_type == 'slave': settings['slave'] = 'yes' if 'master' not in settings: msg = 'master is a required setting for slave interfaces' log.error(msg) raise AttributeError(msg) if iface_type == 'vlan': settings['vlan'] = 'yes' if iface_type == 'bridge': __salt__['pkg.install']('bridge-utils') if iface_type in ['eth', 'bond', 'bridge', 'slave', 'vlan', 'ipip', 'ib', 'alias']: opts = _parse_settings_eth(settings, iface_type, enabled, iface) try: template = JINJA.get_template('rh{0}_eth.jinja'.format(rh_major)) except jinja2.exceptions.TemplateNotFound: log.error( 'Could not load template rh%s_eth.jinja', rh_major ) return '' ifcfg = template.render(opts) if 'test' in settings and settings['test']: return _read_temp(ifcfg) _write_file_iface(iface, ifcfg, _RH_NETWORK_SCRIPT_DIR, 'ifcfg-{0}') path = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'ifcfg-{0}'.format(iface)) return _read_file(path) def build_routes(iface, **settings): ''' Build a route script for a network interface. CLI Example: .. code-block:: bash salt '*' ip.build_routes eth0 <settings> ''' template = 'rh6_route_eth.jinja' try: if int(__grains__['osrelease'][0]) < 6: template = 'route_eth.jinja' except ValueError: pass log.debug('Template name: %s', template) opts = _parse_routes(iface, settings) log.debug('Opts: \n %s', opts) try: template = JINJA.get_template(template) except jinja2.exceptions.TemplateNotFound: log.error('Could not load template %s', template) return '' opts6 = [] opts4 = [] for route in opts['routes']: ipaddr = route['ipaddr'] if salt.utils.validate.net.ipv6_addr(ipaddr): opts6.append(route) else: opts4.append(route) log.debug("IPv4 routes:\n%s", opts4) log.debug("IPv6 routes:\n%s", opts6) routecfg = template.render(routes=opts4, iface=iface) routecfg6 = template.render(routes=opts6, iface=iface) if settings['test']: routes = _read_temp(routecfg) routes.extend(_read_temp(routecfg6)) return routes _write_file_iface(iface, routecfg, _RH_NETWORK_SCRIPT_DIR, 'route-{0}') _write_file_iface(iface, routecfg6, _RH_NETWORK_SCRIPT_DIR, 'route6-{0}') path = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'route-{0}'.format(iface)) path6 = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'route6-{0}'.format(iface)) routes = _read_file(path) routes.extend(_read_file(path6)) return routes def down(iface, iface_type): ''' Shutdown a network interface CLI Example: .. code-block:: bash salt '*' ip.down eth0 ''' # Slave devices are controlled by the master. if iface_type not in ['slave']: return __salt__['cmd.run']('ifdown {0}'.format(iface)) return None def get_bond(iface): ''' Return the content of a bond script CLI Example: .. code-block:: bash salt '*' ip.get_bond bond0 ''' path = os.path.join(_RH_NETWORK_CONF_FILES, '{0}.conf'.format(iface)) return _read_file(path) def get_interface(iface): ''' Return the contents of an interface script CLI Example: .. code-block:: bash salt '*' ip.get_interface eth0 ''' path = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'ifcfg-{0}'.format(iface)) return _read_file(path) def up(iface, iface_type): # pylint: disable=C0103 ''' Start up a network interface CLI Example: .. code-block:: bash salt '*' ip.up eth0 ''' # Slave devices are controlled by the master. if iface_type not in ['slave']: return __salt__['cmd.run']('ifup {0}'.format(iface)) return None def get_routes(iface): ''' Return the contents of the interface routes script. CLI Example: .. code-block:: bash salt '*' ip.get_routes eth0 ''' path = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'route-{0}'.format(iface)) path6 = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'route6-{0}'.format(iface)) routes = _read_file(path) routes.extend(_read_file(path6)) return routes def get_network_settings(): ''' Return the contents of the global network script. CLI Example: .. code-block:: bash salt '*' ip.get_network_settings ''' return _read_file(_RH_NETWORK_FILE) def apply_network_settings(**settings): ''' Apply global network configuration. CLI Example: .. code-block:: bash salt '*' ip.apply_network_settings ''' if 'require_reboot' not in settings: settings['require_reboot'] = False if 'apply_hostname' not in settings: settings['apply_hostname'] = False hostname_res = True if settings['apply_hostname'] in _CONFIG_TRUE: if 'hostname' in settings: hostname_res = __salt__['network.mod_hostname'](settings['hostname']) else: log.warning( 'The network state sls is trying to apply hostname ' 'changes but no hostname is defined.' ) hostname_res = False res = True if settings['require_reboot'] in _CONFIG_TRUE: log.warning( 'The network state sls is requiring a reboot of the system to ' 'properly apply network configuration.' ) res = True else: res = __salt__['service.restart']('network') return hostname_res and res def build_network_settings(**settings): ''' Build the global network script. CLI Example: .. code-block:: bash salt '*' ip.build_network_settings <settings> ''' # Read current configuration and store default values current_network_settings = _parse_rh_config(_RH_NETWORK_FILE) # Build settings opts = _parse_network_settings(settings, current_network_settings) try: template = JINJA.get_template('network.jinja') except jinja2.exceptions.TemplateNotFound: log.error('Could not load template network.jinja') return '' network = template.render(opts) if settings['test']: return _read_temp(network) # Write settings _write_file_network(network, _RH_NETWORK_FILE) return _read_file(_RH_NETWORK_FILE)
saltstack/salt
salt/modules/rh_ip.py
build_interface
python
def build_interface(iface, iface_type, enabled, **settings): ''' Build an interface script for a network interface. CLI Example: .. code-block:: bash salt '*' ip.build_interface eth0 eth <settings> ''' if __grains__['os'] == 'Fedora': if __grains__['osmajorrelease'] >= 18: rh_major = '7' else: rh_major = '6' else: rh_major = __grains__['osrelease'][:1] iface_type = iface_type.lower() if iface_type not in _IFACE_TYPES: _raise_error_iface(iface, iface_type, _IFACE_TYPES) if iface_type == 'slave': settings['slave'] = 'yes' if 'master' not in settings: msg = 'master is a required setting for slave interfaces' log.error(msg) raise AttributeError(msg) if iface_type == 'vlan': settings['vlan'] = 'yes' if iface_type == 'bridge': __salt__['pkg.install']('bridge-utils') if iface_type in ['eth', 'bond', 'bridge', 'slave', 'vlan', 'ipip', 'ib', 'alias']: opts = _parse_settings_eth(settings, iface_type, enabled, iface) try: template = JINJA.get_template('rh{0}_eth.jinja'.format(rh_major)) except jinja2.exceptions.TemplateNotFound: log.error( 'Could not load template rh%s_eth.jinja', rh_major ) return '' ifcfg = template.render(opts) if 'test' in settings and settings['test']: return _read_temp(ifcfg) _write_file_iface(iface, ifcfg, _RH_NETWORK_SCRIPT_DIR, 'ifcfg-{0}') path = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'ifcfg-{0}'.format(iface)) return _read_file(path)
Build an interface script for a network interface. CLI Example: .. code-block:: bash salt '*' ip.build_interface eth0 eth <settings>
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/rh_ip.py#L1020-L1074
[ "def _read_file(path):\n '''\n Reads and returns the contents of a file\n '''\n try:\n with salt.utils.files.fopen(path, 'rb') as rfh:\n lines = salt.utils.stringutils.to_unicode(rfh.read()).splitlines()\n try:\n lines.remove('')\n except ValueError:\n pass\n return lines\n except Exception:\n return [] # Return empty list for type consistency\n", "def _raise_error_iface(iface, option, expected):\n '''\n Log and raise an error with a logical formatted message.\n '''\n msg = _error_msg_iface(iface, option, expected)\n log.error(msg)\n raise AttributeError(msg)\n", "def _parse_settings_eth(opts, iface_type, enabled, iface):\n '''\n Filters given options and outputs valid settings for a\n network interface.\n '''\n result = {'name': iface}\n if 'proto' in opts:\n valid = ['none', 'bootp', 'dhcp']\n if opts['proto'] in valid:\n result['proto'] = opts['proto']\n else:\n _raise_error_iface(iface, opts['proto'], valid)\n\n if 'dns' in opts:\n result['dns'] = opts['dns']\n result['peerdns'] = 'yes'\n\n if 'mtu' in opts:\n try:\n result['mtu'] = int(opts['mtu'])\n except ValueError:\n _raise_error_iface(iface, 'mtu', ['integer'])\n\n if iface_type not in ['bridge']:\n ethtool = _parse_ethtool_opts(opts, iface)\n if ethtool:\n result['ethtool'] = ethtool\n\n if iface_type == 'slave':\n result['proto'] = 'none'\n\n if iface_type == 'bond':\n bonding = _parse_settings_bond(opts, iface)\n if bonding:\n result['bonding'] = bonding\n result['devtype'] = \"Bond\"\n\n if iface_type == 'vlan':\n vlan = _parse_settings_vlan(opts, iface)\n if vlan:\n result['devtype'] = \"Vlan\"\n for opt in vlan:\n result[opt] = opts[opt]\n\n if iface_type not in ['bond', 'vlan', 'bridge', 'ipip']:\n auto_addr = False\n if 'addr' in opts:\n if salt.utils.validate.net.mac(opts['addr']):\n result['addr'] = opts['addr']\n elif opts['addr'] == 'auto':\n auto_addr = True\n elif opts['addr'] != 'none':\n _raise_error_iface(iface, opts['addr'], ['AA:BB:CC:DD:EE:FF', 'auto', 'none'])\n else:\n auto_addr = True\n\n if auto_addr:\n # If interface type is slave for bond, not setting hwaddr\n if iface_type != 'slave':\n ifaces = __salt__['network.interfaces']()\n if iface in ifaces and 'hwaddr' in ifaces[iface]:\n result['addr'] = ifaces[iface]['hwaddr']\n if iface_type == 'eth':\n result['devtype'] = 'Ethernet'\n if iface_type == 'bridge':\n result['devtype'] = 'Bridge'\n bypassfirewall = True\n valid = _CONFIG_TRUE + _CONFIG_FALSE\n for opt in ['bypassfirewall']:\n if opt in opts:\n if opts[opt] in _CONFIG_TRUE:\n bypassfirewall = True\n elif opts[opt] in _CONFIG_FALSE:\n bypassfirewall = False\n else:\n _raise_error_iface(iface, opts[opt], valid)\n\n bridgectls = [\n 'net.bridge.bridge-nf-call-ip6tables',\n 'net.bridge.bridge-nf-call-iptables',\n 'net.bridge.bridge-nf-call-arptables',\n ]\n\n if bypassfirewall:\n sysctl_value = 0\n else:\n sysctl_value = 1\n\n for sysctl in bridgectls:\n try:\n __salt__['sysctl.persist'](sysctl, sysctl_value)\n except CommandExecutionError:\n log.warning('Failed to set sysctl: %s', sysctl)\n\n else:\n if 'bridge' in opts:\n result['bridge'] = opts['bridge']\n\n if iface_type == 'ipip':\n result['devtype'] = 'IPIP'\n for opt in ['my_inner_ipaddr', 'my_outer_ipaddr']:\n if opt not in opts:\n _raise_error_iface(iface, opts[opt], ['1.2.3.4'])\n else:\n result[opt] = opts[opt]\n if iface_type == 'ib':\n result['devtype'] = 'InfiniBand'\n\n if 'prefix' in opts:\n if 'netmask' in opts:\n msg = 'Cannot use prefix and netmask together'\n log.error(msg)\n raise AttributeError(msg)\n result['prefix'] = opts['prefix']\n elif 'netmask' in opts:\n result['netmask'] = opts['netmask']\n\n for opt in ['ipaddr', 'master', 'srcaddr', 'delay', 'domain', 'gateway', 'uuid', 'nickname', 'zone']:\n if opt in opts:\n result[opt] = opts[opt]\n\n for opt in ['ipv6addr', 'ipv6gateway']:\n if opt in opts:\n result[opt] = opts[opt]\n\n if 'ipaddrs' in opts:\n result['ipaddrs'] = []\n for opt in opts['ipaddrs']:\n if salt.utils.validate.net.ipv4_addr(opt):\n ip, prefix = [i.strip() for i in opt.split('/')]\n result['ipaddrs'].append({'ipaddr': ip, 'prefix': prefix})\n else:\n msg = 'ipv4 CIDR is invalid'\n log.error(msg)\n raise AttributeError(msg)\n\n if 'ipv6addrs' in opts:\n for opt in opts['ipv6addrs']:\n if not salt.utils.validate.net.ipv6_addr(opt):\n msg = 'ipv6 CIDR is invalid'\n log.error(msg)\n raise AttributeError(msg)\n result['ipv6addrs'] = opts['ipv6addrs']\n\n if 'enable_ipv6' in opts:\n result['enable_ipv6'] = opts['enable_ipv6']\n\n valid = _CONFIG_TRUE + _CONFIG_FALSE\n for opt in ['onparent', 'peerdns', 'peerroutes', 'slave', 'vlan', 'defroute', 'stp', 'ipv6_peerdns',\n 'ipv6_defroute', 'ipv6_peerroutes', 'ipv6_autoconf', 'ipv4_failure_fatal', 'dhcpv6c']:\n if opt in opts:\n if opts[opt] in _CONFIG_TRUE:\n result[opt] = 'yes'\n elif opts[opt] in _CONFIG_FALSE:\n result[opt] = 'no'\n else:\n _raise_error_iface(iface, opts[opt], valid)\n\n if 'onboot' in opts:\n log.warning(\n 'The \\'onboot\\' option is controlled by the \\'enabled\\' option. '\n 'Interface: %s Enabled: %s', iface, enabled\n )\n\n if enabled:\n result['onboot'] = 'yes'\n else:\n result['onboot'] = 'no'\n\n # If the interface is defined then we want to always take\n # control away from non-root users; unless the administrator\n # wants to allow non-root users to control the device.\n if 'userctl' in opts:\n if opts['userctl'] in _CONFIG_TRUE:\n result['userctl'] = 'yes'\n elif opts['userctl'] in _CONFIG_FALSE:\n result['userctl'] = 'no'\n else:\n _raise_error_iface(iface, opts['userctl'], valid)\n else:\n result['userctl'] = 'no'\n\n # This vlan is in opts, and should be only used in range interface\n # will affect jinja template for interface generating\n if 'vlan' in opts:\n if opts['vlan'] in _CONFIG_TRUE:\n result['vlan'] = 'yes'\n elif opts['vlan'] in _CONFIG_FALSE:\n result['vlan'] = 'no'\n else:\n _raise_error_iface(iface, opts['vlan'], valid)\n\n if 'arpcheck' in opts:\n if opts['arpcheck'] in _CONFIG_FALSE:\n result['arpcheck'] = 'no'\n\n if 'ipaddr_start' in opts:\n result['ipaddr_start'] = opts['ipaddr_start']\n\n if 'ipaddr_end' in opts:\n result['ipaddr_end'] = opts['ipaddr_end']\n\n if 'clonenum_start' in opts:\n result['clonenum_start'] = opts['clonenum_start']\n\n # If NetworkManager is available, we can control whether we use\n # it or not\n if 'nm_controlled' in opts:\n if opts['nm_controlled'] in _CONFIG_TRUE:\n result['nm_controlled'] = 'yes'\n elif opts['nm_controlled'] in _CONFIG_FALSE:\n result['nm_controlled'] = 'no'\n else:\n _raise_error_iface(iface, opts['nm_controlled'], valid)\n else:\n result['nm_controlled'] = 'no'\n\n return result\n", "def _read_temp(data):\n lines = data.splitlines()\n try: # Discard newlines if they exist\n lines.remove('')\n except ValueError:\n pass\n return lines\n", "def _write_file_iface(iface, data, folder, pattern):\n '''\n Writes a file to disk\n '''\n filename = os.path.join(folder, pattern.format(iface))\n if not os.path.exists(folder):\n msg = '{0} cannot be written. {1} does not exist'\n msg = msg.format(filename, folder)\n log.error(msg)\n raise AttributeError(msg)\n with salt.utils.files.fopen(filename, 'w') as fp_:\n fp_.write(salt.utils.stringutils.to_str(data))\n" ]
# -*- coding: utf-8 -*- ''' The networking module for RHEL/Fedora based distros ''' from __future__ import absolute_import, unicode_literals, print_function # Import python libs import logging import os.path import os # Import third party libs import jinja2 import jinja2.exceptions # Import salt libs import salt.utils.files import salt.utils.stringutils import salt.utils.templates import salt.utils.validate.net from salt.exceptions import CommandExecutionError from salt.ext import six # Set up logging log = logging.getLogger(__name__) # Set up template environment JINJA = jinja2.Environment( loader=jinja2.FileSystemLoader( os.path.join(salt.utils.templates.TEMPLATE_DIRNAME, 'rh_ip') ) ) # Define the module's virtual name __virtualname__ = 'ip' def __virtual__(): ''' Confine this module to RHEL/Fedora based distros ''' if __grains__['os_family'] == 'RedHat': return __virtualname__ return (False, 'The rh_ip execution module cannot be loaded: this module is only available on RHEL/Fedora based distributions.') # Setup networking attributes _ETHTOOL_CONFIG_OPTS = [ 'autoneg', 'speed', 'duplex', 'rx', 'tx', 'sg', 'tso', 'ufo', 'gso', 'gro', 'lro', 'advertise' ] _RH_CONFIG_OPTS = [ 'domain', 'peerdns', 'peerntp', 'defroute', 'mtu', 'static-routes', 'gateway', 'zone' ] _RH_CONFIG_BONDING_OPTS = [ 'mode', 'miimon', 'arp_interval', 'arp_ip_target', 'downdelay', 'updelay', 'use_carrier', 'lacp_rate', 'hashing-algorithm', 'max_bonds', 'tx_queues', 'num_grat_arp', 'num_unsol_na', 'primary', 'primary_reselect', 'ad_select', 'xmit_hash_policy', 'arp_validate', 'fail_over_mac', 'all_slaves_active', 'resend_igmp' ] _RH_NETWORK_SCRIPT_DIR = '/etc/sysconfig/network-scripts' _RH_NETWORK_FILE = '/etc/sysconfig/network' _RH_NETWORK_CONF_FILES = '/etc/modprobe.d' _CONFIG_TRUE = ['yes', 'on', 'true', '1', True] _CONFIG_FALSE = ['no', 'off', 'false', '0', False] _IFACE_TYPES = [ 'eth', 'bond', 'alias', 'clone', 'ipsec', 'dialup', 'bridge', 'slave', 'vlan', 'ipip', 'ib', ] def _error_msg_iface(iface, option, expected): ''' Build an appropriate error message from a given option and a list of expected values. ''' msg = 'Invalid option -- Interface: {0}, Option: {1}, Expected: [{2}]' return msg.format(iface, option, '|'.join(str(e) for e in expected)) def _error_msg_routes(iface, option, expected): ''' Build an appropriate error message from a given option and a list of expected values. ''' msg = 'Invalid option -- Route interface: {0}, Option: {1}, Expected: [{2}]' return msg.format(iface, option, expected) def _log_default_iface(iface, opt, value): log.info('Using default option -- Interface: %s Option: %s Value: %s', iface, opt, value) def _error_msg_network(option, expected): ''' Build an appropriate error message from a given option and a list of expected values. ''' msg = 'Invalid network setting -- Setting: {0}, Expected: [{1}]' return msg.format(option, '|'.join(str(e) for e in expected)) def _log_default_network(opt, value): log.info('Using existing setting -- Setting: %s Value: %s', opt, value) def _parse_rh_config(path): rh_config = _read_file(path) cv_rh_config = {} if rh_config: for line in rh_config: line = line.strip() if not line or line.startswith('!') or line.startswith('#'): continue pair = [p.rstrip() for p in line.split('=', 1)] if len(pair) != 2: continue name, value = pair cv_rh_config[name.upper()] = value return cv_rh_config def _parse_ethtool_opts(opts, iface): ''' Filters given options and outputs valid settings for ETHTOOLS_OPTS If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' config = {} if 'autoneg' in opts: if opts['autoneg'] in _CONFIG_TRUE: config.update({'autoneg': 'on'}) elif opts['autoneg'] in _CONFIG_FALSE: config.update({'autoneg': 'off'}) else: _raise_error_iface(iface, 'autoneg', _CONFIG_TRUE + _CONFIG_FALSE) if 'duplex' in opts: valid = ['full', 'half'] if opts['duplex'] in valid: config.update({'duplex': opts['duplex']}) else: _raise_error_iface(iface, 'duplex', valid) if 'speed' in opts: valid = ['10', '100', '1000', '10000'] if six.text_type(opts['speed']) in valid: config.update({'speed': opts['speed']}) else: _raise_error_iface(iface, opts['speed'], valid) if 'advertise' in opts: valid = [ '0x001', '0x002', '0x004', '0x008', '0x010', '0x020', '0x20000', '0x8000', '0x1000', '0x40000', '0x80000', '0x200000', '0x400000', '0x800000', '0x1000000', '0x2000000', '0x4000000' ] if six.text_type(opts['advertise']) in valid: config.update({'advertise': opts['advertise']}) else: _raise_error_iface(iface, 'advertise', valid) valid = _CONFIG_TRUE + _CONFIG_FALSE for option in ('rx', 'tx', 'sg', 'tso', 'ufo', 'gso', 'gro', 'lro'): if option in opts: if opts[option] in _CONFIG_TRUE: config.update({option: 'on'}) elif opts[option] in _CONFIG_FALSE: config.update({option: 'off'}) else: _raise_error_iface(iface, option, valid) return config def _parse_settings_bond(opts, iface): ''' Filters given options and outputs valid settings for requested operation. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond_def = { # 803.ad aggregation selection logic # 0 for stable (default) # 1 for bandwidth # 2 for count 'ad_select': '0', # Max number of transmit queues (default = 16) 'tx_queues': '16', # Link monitoring in milliseconds. Most NICs support this 'miimon': '100', # ARP interval in milliseconds 'arp_interval': '250', # Delay before considering link down in milliseconds (miimon * 2) 'downdelay': '200', # lacp_rate 0: Slow - every 30 seconds # lacp_rate 1: Fast - every 1 second 'lacp_rate': '0', # Max bonds for this driver 'max_bonds': '1', # Specifies the time, in milliseconds, to wait before # enabling a slave after a link recovery has been # detected. Only used with miimon. 'updelay': '0', # Used with miimon. # On: driver sends mii # Off: ethtool sends mii 'use_carrier': '0', # Default. Don't change unless you know what you are doing. 'xmit_hash_policy': 'layer2', } if opts['mode'] in ['balance-rr', '0']: log.info( 'Device: %s Bonding Mode: load balancing (round-robin)', iface ) return _parse_settings_bond_0(opts, iface, bond_def) elif opts['mode'] in ['active-backup', '1']: log.info( 'Device: %s Bonding Mode: fault-tolerance (active-backup)', iface ) return _parse_settings_bond_1(opts, iface, bond_def) elif opts['mode'] in ['balance-xor', '2']: log.info( 'Device: %s Bonding Mode: load balancing (xor)', iface ) return _parse_settings_bond_2(opts, iface, bond_def) elif opts['mode'] in ['broadcast', '3']: log.info( 'Device: %s Bonding Mode: fault-tolerance (broadcast)', iface ) return _parse_settings_bond_3(opts, iface, bond_def) elif opts['mode'] in ['802.3ad', '4']: log.info( 'Device: %s Bonding Mode: IEEE 802.3ad Dynamic link ' 'aggregation', iface ) return _parse_settings_bond_4(opts, iface, bond_def) elif opts['mode'] in ['balance-tlb', '5']: log.info( 'Device: %s Bonding Mode: transmit load balancing', iface ) return _parse_settings_bond_5(opts, iface, bond_def) elif opts['mode'] in ['balance-alb', '6']: log.info( 'Device: %s Bonding Mode: adaptive load balancing', iface ) return _parse_settings_bond_6(opts, iface, bond_def) else: valid = [ '0', '1', '2', '3', '4', '5', '6', 'balance-rr', 'active-backup', 'balance-xor', 'broadcast', '802.3ad', 'balance-tlb', 'balance-alb' ] _raise_error_iface(iface, 'mode', valid) def _parse_settings_bond_0(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond0. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' # balance-rr shares miimon settings with balance-xor bond = _parse_settings_bond_1(opts, iface, bond_def) bond.update({'mode': '0'}) # ARP targets in n.n.n.n form valid = ['list of ips (up to 16)'] if 'arp_ip_target' in opts: if isinstance(opts['arp_ip_target'], list): if 1 <= len(opts['arp_ip_target']) <= 16: bond.update({'arp_ip_target': ''}) for ip in opts['arp_ip_target']: # pylint: disable=C0103 if bond['arp_ip_target']: bond['arp_ip_target'] = bond['arp_ip_target'] + ',' + ip else: bond['arp_ip_target'] = ip else: _raise_error_iface(iface, 'arp_ip_target', valid) else: _raise_error_iface(iface, 'arp_ip_target', valid) elif 'miimon' not in opts: _raise_error_iface(iface, 'arp_ip_target', valid) if 'arp_interval' in opts: try: int(opts['arp_interval']) bond.update({'arp_interval': opts['arp_interval']}) except Exception: _raise_error_iface(iface, 'arp_interval', ['integer']) else: _log_default_iface(iface, 'arp_interval', bond_def['arp_interval']) bond.update({'arp_interval': bond_def['arp_interval']}) return bond def _parse_settings_bond_1(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond1. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '1'} for binding in ['miimon', 'downdelay', 'updelay']: if binding in opts: try: int(opts[binding]) bond.update({binding: opts[binding]}) except Exception: _raise_error_iface(iface, binding, ['integer']) else: _log_default_iface(iface, binding, bond_def[binding]) bond.update({binding: bond_def[binding]}) if 'use_carrier' in opts: if opts['use_carrier'] in _CONFIG_TRUE: bond.update({'use_carrier': '1'}) elif opts['use_carrier'] in _CONFIG_FALSE: bond.update({'use_carrier': '0'}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'use_carrier', valid) else: _log_default_iface(iface, 'use_carrier', bond_def['use_carrier']) bond.update({'use_carrier': bond_def['use_carrier']}) if 'primary' in opts: bond.update({'primary': opts['primary']}) return bond def _parse_settings_bond_2(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond2. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '2'} valid = ['list of ips (up to 16)'] if 'arp_ip_target' in opts: if isinstance(opts['arp_ip_target'], list): if 1 <= len(opts['arp_ip_target']) <= 16: bond.update({'arp_ip_target': ''}) for ip in opts['arp_ip_target']: # pylint: disable=C0103 if bond['arp_ip_target']: bond['arp_ip_target'] = bond['arp_ip_target'] + ',' + ip else: bond['arp_ip_target'] = ip else: _raise_error_iface(iface, 'arp_ip_target', valid) else: _raise_error_iface(iface, 'arp_ip_target', valid) else: _raise_error_iface(iface, 'arp_ip_target', valid) if 'arp_interval' in opts: try: int(opts['arp_interval']) bond.update({'arp_interval': opts['arp_interval']}) except Exception: _raise_error_iface(iface, 'arp_interval', ['integer']) else: _log_default_iface(iface, 'arp_interval', bond_def['arp_interval']) bond.update({'arp_interval': bond_def['arp_interval']}) if 'hashing-algorithm' in opts: valid = ['layer2', 'layer2+3', 'layer3+4'] if opts['hashing-algorithm'] in valid: bond.update({'xmit_hash_policy': opts['hashing-algorithm']}) else: _raise_error_iface(iface, 'hashing-algorithm', valid) return bond def _parse_settings_bond_3(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond3. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '3'} for binding in ['miimon', 'downdelay', 'updelay']: if binding in opts: try: int(opts[binding]) bond.update({binding: opts[binding]}) except Exception: _raise_error_iface(iface, binding, ['integer']) else: _log_default_iface(iface, binding, bond_def[binding]) bond.update({binding: bond_def[binding]}) if 'use_carrier' in opts: if opts['use_carrier'] in _CONFIG_TRUE: bond.update({'use_carrier': '1'}) elif opts['use_carrier'] in _CONFIG_FALSE: bond.update({'use_carrier': '0'}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'use_carrier', valid) else: _log_default_iface(iface, 'use_carrier', bond_def['use_carrier']) bond.update({'use_carrier': bond_def['use_carrier']}) return bond def _parse_settings_bond_4(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond4. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '4'} for binding in ['miimon', 'downdelay', 'updelay', 'lacp_rate', 'ad_select']: if binding in opts: if binding == 'lacp_rate': if opts[binding] == 'fast': opts.update({binding: '1'}) if opts[binding] == 'slow': opts.update({binding: '0'}) valid = ['fast', '1', 'slow', '0'] else: valid = ['integer'] try: int(opts[binding]) bond.update({binding: opts[binding]}) except Exception: _raise_error_iface(iface, binding, valid) else: _log_default_iface(iface, binding, bond_def[binding]) bond.update({binding: bond_def[binding]}) if 'use_carrier' in opts: if opts['use_carrier'] in _CONFIG_TRUE: bond.update({'use_carrier': '1'}) elif opts['use_carrier'] in _CONFIG_FALSE: bond.update({'use_carrier': '0'}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'use_carrier', valid) else: _log_default_iface(iface, 'use_carrier', bond_def['use_carrier']) bond.update({'use_carrier': bond_def['use_carrier']}) if 'hashing-algorithm' in opts: valid = ['layer2', 'layer2+3', 'layer3+4'] if opts['hashing-algorithm'] in valid: bond.update({'xmit_hash_policy': opts['hashing-algorithm']}) else: _raise_error_iface(iface, 'hashing-algorithm', valid) return bond def _parse_settings_bond_5(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond5. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '5'} for binding in ['miimon', 'downdelay', 'updelay']: if binding in opts: try: int(opts[binding]) bond.update({binding: opts[binding]}) except Exception: _raise_error_iface(iface, binding, ['integer']) else: _log_default_iface(iface, binding, bond_def[binding]) bond.update({binding: bond_def[binding]}) if 'use_carrier' in opts: if opts['use_carrier'] in _CONFIG_TRUE: bond.update({'use_carrier': '1'}) elif opts['use_carrier'] in _CONFIG_FALSE: bond.update({'use_carrier': '0'}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'use_carrier', valid) else: _log_default_iface(iface, 'use_carrier', bond_def['use_carrier']) bond.update({'use_carrier': bond_def['use_carrier']}) if 'primary' in opts: bond.update({'primary': opts['primary']}) return bond def _parse_settings_bond_6(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond6. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '6'} for binding in ['miimon', 'downdelay', 'updelay']: if binding in opts: try: int(opts[binding]) bond.update({binding: opts[binding]}) except Exception: _raise_error_iface(iface, binding, ['integer']) else: _log_default_iface(iface, binding, bond_def[binding]) bond.update({binding: bond_def[binding]}) if 'use_carrier' in opts: if opts['use_carrier'] in _CONFIG_TRUE: bond.update({'use_carrier': '1'}) elif opts['use_carrier'] in _CONFIG_FALSE: bond.update({'use_carrier': '0'}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'use_carrier', valid) else: _log_default_iface(iface, 'use_carrier', bond_def['use_carrier']) bond.update({'use_carrier': bond_def['use_carrier']}) if 'primary' in opts: bond.update({'primary': opts['primary']}) return bond def _parse_settings_vlan(opts, iface): ''' Filters given options and outputs valid settings for a vlan ''' vlan = {} if 'reorder_hdr' in opts: if opts['reorder_hdr'] in _CONFIG_TRUE + _CONFIG_FALSE: vlan.update({'reorder_hdr': opts['reorder_hdr']}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'reorder_hdr', valid) if 'vlan_id' in opts: if opts['vlan_id'] > 0: vlan.update({'vlan_id': opts['vlan_id']}) else: _raise_error_iface(iface, 'vlan_id', 'Positive integer') if 'phys_dev' in opts: if opts['phys_dev']: vlan.update({'phys_dev': opts['phys_dev']}) else: _raise_error_iface(iface, 'phys_dev', 'Non-empty string') return vlan def _parse_settings_eth(opts, iface_type, enabled, iface): ''' Filters given options and outputs valid settings for a network interface. ''' result = {'name': iface} if 'proto' in opts: valid = ['none', 'bootp', 'dhcp'] if opts['proto'] in valid: result['proto'] = opts['proto'] else: _raise_error_iface(iface, opts['proto'], valid) if 'dns' in opts: result['dns'] = opts['dns'] result['peerdns'] = 'yes' if 'mtu' in opts: try: result['mtu'] = int(opts['mtu']) except ValueError: _raise_error_iface(iface, 'mtu', ['integer']) if iface_type not in ['bridge']: ethtool = _parse_ethtool_opts(opts, iface) if ethtool: result['ethtool'] = ethtool if iface_type == 'slave': result['proto'] = 'none' if iface_type == 'bond': bonding = _parse_settings_bond(opts, iface) if bonding: result['bonding'] = bonding result['devtype'] = "Bond" if iface_type == 'vlan': vlan = _parse_settings_vlan(opts, iface) if vlan: result['devtype'] = "Vlan" for opt in vlan: result[opt] = opts[opt] if iface_type not in ['bond', 'vlan', 'bridge', 'ipip']: auto_addr = False if 'addr' in opts: if salt.utils.validate.net.mac(opts['addr']): result['addr'] = opts['addr'] elif opts['addr'] == 'auto': auto_addr = True elif opts['addr'] != 'none': _raise_error_iface(iface, opts['addr'], ['AA:BB:CC:DD:EE:FF', 'auto', 'none']) else: auto_addr = True if auto_addr: # If interface type is slave for bond, not setting hwaddr if iface_type != 'slave': ifaces = __salt__['network.interfaces']() if iface in ifaces and 'hwaddr' in ifaces[iface]: result['addr'] = ifaces[iface]['hwaddr'] if iface_type == 'eth': result['devtype'] = 'Ethernet' if iface_type == 'bridge': result['devtype'] = 'Bridge' bypassfirewall = True valid = _CONFIG_TRUE + _CONFIG_FALSE for opt in ['bypassfirewall']: if opt in opts: if opts[opt] in _CONFIG_TRUE: bypassfirewall = True elif opts[opt] in _CONFIG_FALSE: bypassfirewall = False else: _raise_error_iface(iface, opts[opt], valid) bridgectls = [ 'net.bridge.bridge-nf-call-ip6tables', 'net.bridge.bridge-nf-call-iptables', 'net.bridge.bridge-nf-call-arptables', ] if bypassfirewall: sysctl_value = 0 else: sysctl_value = 1 for sysctl in bridgectls: try: __salt__['sysctl.persist'](sysctl, sysctl_value) except CommandExecutionError: log.warning('Failed to set sysctl: %s', sysctl) else: if 'bridge' in opts: result['bridge'] = opts['bridge'] if iface_type == 'ipip': result['devtype'] = 'IPIP' for opt in ['my_inner_ipaddr', 'my_outer_ipaddr']: if opt not in opts: _raise_error_iface(iface, opts[opt], ['1.2.3.4']) else: result[opt] = opts[opt] if iface_type == 'ib': result['devtype'] = 'InfiniBand' if 'prefix' in opts: if 'netmask' in opts: msg = 'Cannot use prefix and netmask together' log.error(msg) raise AttributeError(msg) result['prefix'] = opts['prefix'] elif 'netmask' in opts: result['netmask'] = opts['netmask'] for opt in ['ipaddr', 'master', 'srcaddr', 'delay', 'domain', 'gateway', 'uuid', 'nickname', 'zone']: if opt in opts: result[opt] = opts[opt] for opt in ['ipv6addr', 'ipv6gateway']: if opt in opts: result[opt] = opts[opt] if 'ipaddrs' in opts: result['ipaddrs'] = [] for opt in opts['ipaddrs']: if salt.utils.validate.net.ipv4_addr(opt): ip, prefix = [i.strip() for i in opt.split('/')] result['ipaddrs'].append({'ipaddr': ip, 'prefix': prefix}) else: msg = 'ipv4 CIDR is invalid' log.error(msg) raise AttributeError(msg) if 'ipv6addrs' in opts: for opt in opts['ipv6addrs']: if not salt.utils.validate.net.ipv6_addr(opt): msg = 'ipv6 CIDR is invalid' log.error(msg) raise AttributeError(msg) result['ipv6addrs'] = opts['ipv6addrs'] if 'enable_ipv6' in opts: result['enable_ipv6'] = opts['enable_ipv6'] valid = _CONFIG_TRUE + _CONFIG_FALSE for opt in ['onparent', 'peerdns', 'peerroutes', 'slave', 'vlan', 'defroute', 'stp', 'ipv6_peerdns', 'ipv6_defroute', 'ipv6_peerroutes', 'ipv6_autoconf', 'ipv4_failure_fatal', 'dhcpv6c']: if opt in opts: if opts[opt] in _CONFIG_TRUE: result[opt] = 'yes' elif opts[opt] in _CONFIG_FALSE: result[opt] = 'no' else: _raise_error_iface(iface, opts[opt], valid) if 'onboot' in opts: log.warning( 'The \'onboot\' option is controlled by the \'enabled\' option. ' 'Interface: %s Enabled: %s', iface, enabled ) if enabled: result['onboot'] = 'yes' else: result['onboot'] = 'no' # If the interface is defined then we want to always take # control away from non-root users; unless the administrator # wants to allow non-root users to control the device. if 'userctl' in opts: if opts['userctl'] in _CONFIG_TRUE: result['userctl'] = 'yes' elif opts['userctl'] in _CONFIG_FALSE: result['userctl'] = 'no' else: _raise_error_iface(iface, opts['userctl'], valid) else: result['userctl'] = 'no' # This vlan is in opts, and should be only used in range interface # will affect jinja template for interface generating if 'vlan' in opts: if opts['vlan'] in _CONFIG_TRUE: result['vlan'] = 'yes' elif opts['vlan'] in _CONFIG_FALSE: result['vlan'] = 'no' else: _raise_error_iface(iface, opts['vlan'], valid) if 'arpcheck' in opts: if opts['arpcheck'] in _CONFIG_FALSE: result['arpcheck'] = 'no' if 'ipaddr_start' in opts: result['ipaddr_start'] = opts['ipaddr_start'] if 'ipaddr_end' in opts: result['ipaddr_end'] = opts['ipaddr_end'] if 'clonenum_start' in opts: result['clonenum_start'] = opts['clonenum_start'] # If NetworkManager is available, we can control whether we use # it or not if 'nm_controlled' in opts: if opts['nm_controlled'] in _CONFIG_TRUE: result['nm_controlled'] = 'yes' elif opts['nm_controlled'] in _CONFIG_FALSE: result['nm_controlled'] = 'no' else: _raise_error_iface(iface, opts['nm_controlled'], valid) else: result['nm_controlled'] = 'no' return result def _parse_routes(iface, opts): ''' Filters given options and outputs valid settings for the route settings file. ''' # Normalize keys opts = dict((k.lower(), v) for (k, v) in six.iteritems(opts)) result = {} if 'routes' not in opts: _raise_error_routes(iface, 'routes', 'List of routes') for opt in opts: result[opt] = opts[opt] return result def _parse_network_settings(opts, current): ''' Filters given options and outputs valid settings for the global network settings file. ''' # Normalize keys opts = dict((k.lower(), v) for (k, v) in six.iteritems(opts)) current = dict((k.lower(), v) for (k, v) in six.iteritems(current)) # Check for supported parameters retain_settings = opts.get('retain_settings', False) result = current if retain_settings else {} # Default quote type is an empty string, which will not quote values quote_type = '' valid = _CONFIG_TRUE + _CONFIG_FALSE if 'enabled' not in opts: try: opts['networking'] = current['networking'] # If networking option is quoted, use its quote type quote_type = salt.utils.stringutils.is_quoted(opts['networking']) _log_default_network('networking', current['networking']) except ValueError: _raise_error_network('networking', valid) else: opts['networking'] = opts['enabled'] true_val = '{0}yes{0}'.format(quote_type) false_val = '{0}no{0}'.format(quote_type) networking = salt.utils.stringutils.dequote(opts['networking']) if networking in valid: if networking in _CONFIG_TRUE: result['networking'] = true_val elif networking in _CONFIG_FALSE: result['networking'] = false_val else: _raise_error_network('networking', valid) if 'hostname' not in opts: try: opts['hostname'] = current['hostname'] _log_default_network('hostname', current['hostname']) except Exception: _raise_error_network('hostname', ['server1.example.com']) if opts['hostname']: result['hostname'] = '{1}{0}{1}'.format( salt.utils.stringutils.dequote(opts['hostname']), quote_type) else: _raise_error_network('hostname', ['server1.example.com']) if 'nozeroconf' in opts: nozeroconf = salt.utils.stringutils.dequote(opts['nozeroconf']) if nozeroconf in valid: if nozeroconf in _CONFIG_TRUE: result['nozeroconf'] = true_val elif nozeroconf in _CONFIG_FALSE: result['nozeroconf'] = false_val else: _raise_error_network('nozeroconf', valid) for opt in opts: if opt not in ['networking', 'hostname', 'nozeroconf']: result[opt] = '{1}{0}{1}'.format( salt.utils.stringutils.dequote(opts[opt]), quote_type) return result def _raise_error_iface(iface, option, expected): ''' Log and raise an error with a logical formatted message. ''' msg = _error_msg_iface(iface, option, expected) log.error(msg) raise AttributeError(msg) def _raise_error_network(option, expected): ''' Log and raise an error with a logical formatted message. ''' msg = _error_msg_network(option, expected) log.error(msg) raise AttributeError(msg) def _raise_error_routes(iface, option, expected): ''' Log and raise an error with a logical formatted message. ''' msg = _error_msg_routes(iface, option, expected) log.error(msg) raise AttributeError(msg) def _read_file(path): ''' Reads and returns the contents of a file ''' try: with salt.utils.files.fopen(path, 'rb') as rfh: lines = salt.utils.stringutils.to_unicode(rfh.read()).splitlines() try: lines.remove('') except ValueError: pass return lines except Exception: return [] # Return empty list for type consistency def _write_file_iface(iface, data, folder, pattern): ''' Writes a file to disk ''' filename = os.path.join(folder, pattern.format(iface)) if not os.path.exists(folder): msg = '{0} cannot be written. {1} does not exist' msg = msg.format(filename, folder) log.error(msg) raise AttributeError(msg) with salt.utils.files.fopen(filename, 'w') as fp_: fp_.write(salt.utils.stringutils.to_str(data)) def _write_file_network(data, filename): ''' Writes a file to disk ''' with salt.utils.files.fopen(filename, 'w') as fp_: fp_.write(salt.utils.stringutils.to_str(data)) def _read_temp(data): lines = data.splitlines() try: # Discard newlines if they exist lines.remove('') except ValueError: pass return lines def build_bond(iface, **settings): ''' Create a bond script in /etc/modprobe.d with the passed settings and load the bonding kernel module. CLI Example: .. code-block:: bash salt '*' ip.build_bond bond0 mode=balance-alb ''' rh_major = __grains__['osrelease'][:1] opts = _parse_settings_bond(settings, iface) try: template = JINJA.get_template('conf.jinja') except jinja2.exceptions.TemplateNotFound: log.error('Could not load template conf.jinja') return '' data = template.render({'name': iface, 'bonding': opts}) _write_file_iface(iface, data, _RH_NETWORK_CONF_FILES, '{0}.conf'.format(iface)) path = os.path.join(_RH_NETWORK_CONF_FILES, '{0}.conf'.format(iface)) if rh_major == '5': __salt__['cmd.run']( 'sed -i -e "/^alias\\s{0}.*/d" /etc/modprobe.conf'.format(iface), python_shell=False ) __salt__['cmd.run']( 'sed -i -e "/^options\\s{0}.*/d" /etc/modprobe.conf'.format(iface), python_shell=False ) __salt__['file.append']('/etc/modprobe.conf', path) __salt__['kmod.load']('bonding') if settings['test']: return _read_temp(data) return _read_file(path) def build_routes(iface, **settings): ''' Build a route script for a network interface. CLI Example: .. code-block:: bash salt '*' ip.build_routes eth0 <settings> ''' template = 'rh6_route_eth.jinja' try: if int(__grains__['osrelease'][0]) < 6: template = 'route_eth.jinja' except ValueError: pass log.debug('Template name: %s', template) opts = _parse_routes(iface, settings) log.debug('Opts: \n %s', opts) try: template = JINJA.get_template(template) except jinja2.exceptions.TemplateNotFound: log.error('Could not load template %s', template) return '' opts6 = [] opts4 = [] for route in opts['routes']: ipaddr = route['ipaddr'] if salt.utils.validate.net.ipv6_addr(ipaddr): opts6.append(route) else: opts4.append(route) log.debug("IPv4 routes:\n%s", opts4) log.debug("IPv6 routes:\n%s", opts6) routecfg = template.render(routes=opts4, iface=iface) routecfg6 = template.render(routes=opts6, iface=iface) if settings['test']: routes = _read_temp(routecfg) routes.extend(_read_temp(routecfg6)) return routes _write_file_iface(iface, routecfg, _RH_NETWORK_SCRIPT_DIR, 'route-{0}') _write_file_iface(iface, routecfg6, _RH_NETWORK_SCRIPT_DIR, 'route6-{0}') path = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'route-{0}'.format(iface)) path6 = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'route6-{0}'.format(iface)) routes = _read_file(path) routes.extend(_read_file(path6)) return routes def down(iface, iface_type): ''' Shutdown a network interface CLI Example: .. code-block:: bash salt '*' ip.down eth0 ''' # Slave devices are controlled by the master. if iface_type not in ['slave']: return __salt__['cmd.run']('ifdown {0}'.format(iface)) return None def get_bond(iface): ''' Return the content of a bond script CLI Example: .. code-block:: bash salt '*' ip.get_bond bond0 ''' path = os.path.join(_RH_NETWORK_CONF_FILES, '{0}.conf'.format(iface)) return _read_file(path) def get_interface(iface): ''' Return the contents of an interface script CLI Example: .. code-block:: bash salt '*' ip.get_interface eth0 ''' path = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'ifcfg-{0}'.format(iface)) return _read_file(path) def up(iface, iface_type): # pylint: disable=C0103 ''' Start up a network interface CLI Example: .. code-block:: bash salt '*' ip.up eth0 ''' # Slave devices are controlled by the master. if iface_type not in ['slave']: return __salt__['cmd.run']('ifup {0}'.format(iface)) return None def get_routes(iface): ''' Return the contents of the interface routes script. CLI Example: .. code-block:: bash salt '*' ip.get_routes eth0 ''' path = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'route-{0}'.format(iface)) path6 = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'route6-{0}'.format(iface)) routes = _read_file(path) routes.extend(_read_file(path6)) return routes def get_network_settings(): ''' Return the contents of the global network script. CLI Example: .. code-block:: bash salt '*' ip.get_network_settings ''' return _read_file(_RH_NETWORK_FILE) def apply_network_settings(**settings): ''' Apply global network configuration. CLI Example: .. code-block:: bash salt '*' ip.apply_network_settings ''' if 'require_reboot' not in settings: settings['require_reboot'] = False if 'apply_hostname' not in settings: settings['apply_hostname'] = False hostname_res = True if settings['apply_hostname'] in _CONFIG_TRUE: if 'hostname' in settings: hostname_res = __salt__['network.mod_hostname'](settings['hostname']) else: log.warning( 'The network state sls is trying to apply hostname ' 'changes but no hostname is defined.' ) hostname_res = False res = True if settings['require_reboot'] in _CONFIG_TRUE: log.warning( 'The network state sls is requiring a reboot of the system to ' 'properly apply network configuration.' ) res = True else: res = __salt__['service.restart']('network') return hostname_res and res def build_network_settings(**settings): ''' Build the global network script. CLI Example: .. code-block:: bash salt '*' ip.build_network_settings <settings> ''' # Read current configuration and store default values current_network_settings = _parse_rh_config(_RH_NETWORK_FILE) # Build settings opts = _parse_network_settings(settings, current_network_settings) try: template = JINJA.get_template('network.jinja') except jinja2.exceptions.TemplateNotFound: log.error('Could not load template network.jinja') return '' network = template.render(opts) if settings['test']: return _read_temp(network) # Write settings _write_file_network(network, _RH_NETWORK_FILE) return _read_file(_RH_NETWORK_FILE)
saltstack/salt
salt/modules/rh_ip.py
build_routes
python
def build_routes(iface, **settings): ''' Build a route script for a network interface. CLI Example: .. code-block:: bash salt '*' ip.build_routes eth0 <settings> ''' template = 'rh6_route_eth.jinja' try: if int(__grains__['osrelease'][0]) < 6: template = 'route_eth.jinja' except ValueError: pass log.debug('Template name: %s', template) opts = _parse_routes(iface, settings) log.debug('Opts: \n %s', opts) try: template = JINJA.get_template(template) except jinja2.exceptions.TemplateNotFound: log.error('Could not load template %s', template) return '' opts6 = [] opts4 = [] for route in opts['routes']: ipaddr = route['ipaddr'] if salt.utils.validate.net.ipv6_addr(ipaddr): opts6.append(route) else: opts4.append(route) log.debug("IPv4 routes:\n%s", opts4) log.debug("IPv6 routes:\n%s", opts6) routecfg = template.render(routes=opts4, iface=iface) routecfg6 = template.render(routes=opts6, iface=iface) if settings['test']: routes = _read_temp(routecfg) routes.extend(_read_temp(routecfg6)) return routes _write_file_iface(iface, routecfg, _RH_NETWORK_SCRIPT_DIR, 'route-{0}') _write_file_iface(iface, routecfg6, _RH_NETWORK_SCRIPT_DIR, 'route6-{0}') path = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'route-{0}'.format(iface)) path6 = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'route6-{0}'.format(iface)) routes = _read_file(path) routes.extend(_read_file(path6)) return routes
Build a route script for a network interface. CLI Example: .. code-block:: bash salt '*' ip.build_routes eth0 <settings>
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/rh_ip.py#L1077-L1130
[ "def _read_file(path):\n '''\n Reads and returns the contents of a file\n '''\n try:\n with salt.utils.files.fopen(path, 'rb') as rfh:\n lines = salt.utils.stringutils.to_unicode(rfh.read()).splitlines()\n try:\n lines.remove('')\n except ValueError:\n pass\n return lines\n except Exception:\n return [] # Return empty list for type consistency\n", "def ipv6_addr(addr):\n '''\n Returns True if the IPv6 address (and optional subnet) are valid, otherwise\n returns False.\n '''\n return __ip_addr(addr, socket.AF_INET6)\n", "def _parse_routes(iface, opts):\n '''\n Filters given options and outputs valid settings for\n the route settings file.\n '''\n # Normalize keys\n opts = dict((k.lower(), v) for (k, v) in six.iteritems(opts))\n result = {}\n if 'routes' not in opts:\n _raise_error_routes(iface, 'routes', 'List of routes')\n\n for opt in opts:\n result[opt] = opts[opt]\n\n return result\n", "def _read_temp(data):\n lines = data.splitlines()\n try: # Discard newlines if they exist\n lines.remove('')\n except ValueError:\n pass\n return lines\n", "def _write_file_iface(iface, data, folder, pattern):\n '''\n Writes a file to disk\n '''\n filename = os.path.join(folder, pattern.format(iface))\n if not os.path.exists(folder):\n msg = '{0} cannot be written. {1} does not exist'\n msg = msg.format(filename, folder)\n log.error(msg)\n raise AttributeError(msg)\n with salt.utils.files.fopen(filename, 'w') as fp_:\n fp_.write(salt.utils.stringutils.to_str(data))\n" ]
# -*- coding: utf-8 -*- ''' The networking module for RHEL/Fedora based distros ''' from __future__ import absolute_import, unicode_literals, print_function # Import python libs import logging import os.path import os # Import third party libs import jinja2 import jinja2.exceptions # Import salt libs import salt.utils.files import salt.utils.stringutils import salt.utils.templates import salt.utils.validate.net from salt.exceptions import CommandExecutionError from salt.ext import six # Set up logging log = logging.getLogger(__name__) # Set up template environment JINJA = jinja2.Environment( loader=jinja2.FileSystemLoader( os.path.join(salt.utils.templates.TEMPLATE_DIRNAME, 'rh_ip') ) ) # Define the module's virtual name __virtualname__ = 'ip' def __virtual__(): ''' Confine this module to RHEL/Fedora based distros ''' if __grains__['os_family'] == 'RedHat': return __virtualname__ return (False, 'The rh_ip execution module cannot be loaded: this module is only available on RHEL/Fedora based distributions.') # Setup networking attributes _ETHTOOL_CONFIG_OPTS = [ 'autoneg', 'speed', 'duplex', 'rx', 'tx', 'sg', 'tso', 'ufo', 'gso', 'gro', 'lro', 'advertise' ] _RH_CONFIG_OPTS = [ 'domain', 'peerdns', 'peerntp', 'defroute', 'mtu', 'static-routes', 'gateway', 'zone' ] _RH_CONFIG_BONDING_OPTS = [ 'mode', 'miimon', 'arp_interval', 'arp_ip_target', 'downdelay', 'updelay', 'use_carrier', 'lacp_rate', 'hashing-algorithm', 'max_bonds', 'tx_queues', 'num_grat_arp', 'num_unsol_na', 'primary', 'primary_reselect', 'ad_select', 'xmit_hash_policy', 'arp_validate', 'fail_over_mac', 'all_slaves_active', 'resend_igmp' ] _RH_NETWORK_SCRIPT_DIR = '/etc/sysconfig/network-scripts' _RH_NETWORK_FILE = '/etc/sysconfig/network' _RH_NETWORK_CONF_FILES = '/etc/modprobe.d' _CONFIG_TRUE = ['yes', 'on', 'true', '1', True] _CONFIG_FALSE = ['no', 'off', 'false', '0', False] _IFACE_TYPES = [ 'eth', 'bond', 'alias', 'clone', 'ipsec', 'dialup', 'bridge', 'slave', 'vlan', 'ipip', 'ib', ] def _error_msg_iface(iface, option, expected): ''' Build an appropriate error message from a given option and a list of expected values. ''' msg = 'Invalid option -- Interface: {0}, Option: {1}, Expected: [{2}]' return msg.format(iface, option, '|'.join(str(e) for e in expected)) def _error_msg_routes(iface, option, expected): ''' Build an appropriate error message from a given option and a list of expected values. ''' msg = 'Invalid option -- Route interface: {0}, Option: {1}, Expected: [{2}]' return msg.format(iface, option, expected) def _log_default_iface(iface, opt, value): log.info('Using default option -- Interface: %s Option: %s Value: %s', iface, opt, value) def _error_msg_network(option, expected): ''' Build an appropriate error message from a given option and a list of expected values. ''' msg = 'Invalid network setting -- Setting: {0}, Expected: [{1}]' return msg.format(option, '|'.join(str(e) for e in expected)) def _log_default_network(opt, value): log.info('Using existing setting -- Setting: %s Value: %s', opt, value) def _parse_rh_config(path): rh_config = _read_file(path) cv_rh_config = {} if rh_config: for line in rh_config: line = line.strip() if not line or line.startswith('!') or line.startswith('#'): continue pair = [p.rstrip() for p in line.split('=', 1)] if len(pair) != 2: continue name, value = pair cv_rh_config[name.upper()] = value return cv_rh_config def _parse_ethtool_opts(opts, iface): ''' Filters given options and outputs valid settings for ETHTOOLS_OPTS If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' config = {} if 'autoneg' in opts: if opts['autoneg'] in _CONFIG_TRUE: config.update({'autoneg': 'on'}) elif opts['autoneg'] in _CONFIG_FALSE: config.update({'autoneg': 'off'}) else: _raise_error_iface(iface, 'autoneg', _CONFIG_TRUE + _CONFIG_FALSE) if 'duplex' in opts: valid = ['full', 'half'] if opts['duplex'] in valid: config.update({'duplex': opts['duplex']}) else: _raise_error_iface(iface, 'duplex', valid) if 'speed' in opts: valid = ['10', '100', '1000', '10000'] if six.text_type(opts['speed']) in valid: config.update({'speed': opts['speed']}) else: _raise_error_iface(iface, opts['speed'], valid) if 'advertise' in opts: valid = [ '0x001', '0x002', '0x004', '0x008', '0x010', '0x020', '0x20000', '0x8000', '0x1000', '0x40000', '0x80000', '0x200000', '0x400000', '0x800000', '0x1000000', '0x2000000', '0x4000000' ] if six.text_type(opts['advertise']) in valid: config.update({'advertise': opts['advertise']}) else: _raise_error_iface(iface, 'advertise', valid) valid = _CONFIG_TRUE + _CONFIG_FALSE for option in ('rx', 'tx', 'sg', 'tso', 'ufo', 'gso', 'gro', 'lro'): if option in opts: if opts[option] in _CONFIG_TRUE: config.update({option: 'on'}) elif opts[option] in _CONFIG_FALSE: config.update({option: 'off'}) else: _raise_error_iface(iface, option, valid) return config def _parse_settings_bond(opts, iface): ''' Filters given options and outputs valid settings for requested operation. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond_def = { # 803.ad aggregation selection logic # 0 for stable (default) # 1 for bandwidth # 2 for count 'ad_select': '0', # Max number of transmit queues (default = 16) 'tx_queues': '16', # Link monitoring in milliseconds. Most NICs support this 'miimon': '100', # ARP interval in milliseconds 'arp_interval': '250', # Delay before considering link down in milliseconds (miimon * 2) 'downdelay': '200', # lacp_rate 0: Slow - every 30 seconds # lacp_rate 1: Fast - every 1 second 'lacp_rate': '0', # Max bonds for this driver 'max_bonds': '1', # Specifies the time, in milliseconds, to wait before # enabling a slave after a link recovery has been # detected. Only used with miimon. 'updelay': '0', # Used with miimon. # On: driver sends mii # Off: ethtool sends mii 'use_carrier': '0', # Default. Don't change unless you know what you are doing. 'xmit_hash_policy': 'layer2', } if opts['mode'] in ['balance-rr', '0']: log.info( 'Device: %s Bonding Mode: load balancing (round-robin)', iface ) return _parse_settings_bond_0(opts, iface, bond_def) elif opts['mode'] in ['active-backup', '1']: log.info( 'Device: %s Bonding Mode: fault-tolerance (active-backup)', iface ) return _parse_settings_bond_1(opts, iface, bond_def) elif opts['mode'] in ['balance-xor', '2']: log.info( 'Device: %s Bonding Mode: load balancing (xor)', iface ) return _parse_settings_bond_2(opts, iface, bond_def) elif opts['mode'] in ['broadcast', '3']: log.info( 'Device: %s Bonding Mode: fault-tolerance (broadcast)', iface ) return _parse_settings_bond_3(opts, iface, bond_def) elif opts['mode'] in ['802.3ad', '4']: log.info( 'Device: %s Bonding Mode: IEEE 802.3ad Dynamic link ' 'aggregation', iface ) return _parse_settings_bond_4(opts, iface, bond_def) elif opts['mode'] in ['balance-tlb', '5']: log.info( 'Device: %s Bonding Mode: transmit load balancing', iface ) return _parse_settings_bond_5(opts, iface, bond_def) elif opts['mode'] in ['balance-alb', '6']: log.info( 'Device: %s Bonding Mode: adaptive load balancing', iface ) return _parse_settings_bond_6(opts, iface, bond_def) else: valid = [ '0', '1', '2', '3', '4', '5', '6', 'balance-rr', 'active-backup', 'balance-xor', 'broadcast', '802.3ad', 'balance-tlb', 'balance-alb' ] _raise_error_iface(iface, 'mode', valid) def _parse_settings_bond_0(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond0. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' # balance-rr shares miimon settings with balance-xor bond = _parse_settings_bond_1(opts, iface, bond_def) bond.update({'mode': '0'}) # ARP targets in n.n.n.n form valid = ['list of ips (up to 16)'] if 'arp_ip_target' in opts: if isinstance(opts['arp_ip_target'], list): if 1 <= len(opts['arp_ip_target']) <= 16: bond.update({'arp_ip_target': ''}) for ip in opts['arp_ip_target']: # pylint: disable=C0103 if bond['arp_ip_target']: bond['arp_ip_target'] = bond['arp_ip_target'] + ',' + ip else: bond['arp_ip_target'] = ip else: _raise_error_iface(iface, 'arp_ip_target', valid) else: _raise_error_iface(iface, 'arp_ip_target', valid) elif 'miimon' not in opts: _raise_error_iface(iface, 'arp_ip_target', valid) if 'arp_interval' in opts: try: int(opts['arp_interval']) bond.update({'arp_interval': opts['arp_interval']}) except Exception: _raise_error_iface(iface, 'arp_interval', ['integer']) else: _log_default_iface(iface, 'arp_interval', bond_def['arp_interval']) bond.update({'arp_interval': bond_def['arp_interval']}) return bond def _parse_settings_bond_1(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond1. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '1'} for binding in ['miimon', 'downdelay', 'updelay']: if binding in opts: try: int(opts[binding]) bond.update({binding: opts[binding]}) except Exception: _raise_error_iface(iface, binding, ['integer']) else: _log_default_iface(iface, binding, bond_def[binding]) bond.update({binding: bond_def[binding]}) if 'use_carrier' in opts: if opts['use_carrier'] in _CONFIG_TRUE: bond.update({'use_carrier': '1'}) elif opts['use_carrier'] in _CONFIG_FALSE: bond.update({'use_carrier': '0'}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'use_carrier', valid) else: _log_default_iface(iface, 'use_carrier', bond_def['use_carrier']) bond.update({'use_carrier': bond_def['use_carrier']}) if 'primary' in opts: bond.update({'primary': opts['primary']}) return bond def _parse_settings_bond_2(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond2. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '2'} valid = ['list of ips (up to 16)'] if 'arp_ip_target' in opts: if isinstance(opts['arp_ip_target'], list): if 1 <= len(opts['arp_ip_target']) <= 16: bond.update({'arp_ip_target': ''}) for ip in opts['arp_ip_target']: # pylint: disable=C0103 if bond['arp_ip_target']: bond['arp_ip_target'] = bond['arp_ip_target'] + ',' + ip else: bond['arp_ip_target'] = ip else: _raise_error_iface(iface, 'arp_ip_target', valid) else: _raise_error_iface(iface, 'arp_ip_target', valid) else: _raise_error_iface(iface, 'arp_ip_target', valid) if 'arp_interval' in opts: try: int(opts['arp_interval']) bond.update({'arp_interval': opts['arp_interval']}) except Exception: _raise_error_iface(iface, 'arp_interval', ['integer']) else: _log_default_iface(iface, 'arp_interval', bond_def['arp_interval']) bond.update({'arp_interval': bond_def['arp_interval']}) if 'hashing-algorithm' in opts: valid = ['layer2', 'layer2+3', 'layer3+4'] if opts['hashing-algorithm'] in valid: bond.update({'xmit_hash_policy': opts['hashing-algorithm']}) else: _raise_error_iface(iface, 'hashing-algorithm', valid) return bond def _parse_settings_bond_3(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond3. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '3'} for binding in ['miimon', 'downdelay', 'updelay']: if binding in opts: try: int(opts[binding]) bond.update({binding: opts[binding]}) except Exception: _raise_error_iface(iface, binding, ['integer']) else: _log_default_iface(iface, binding, bond_def[binding]) bond.update({binding: bond_def[binding]}) if 'use_carrier' in opts: if opts['use_carrier'] in _CONFIG_TRUE: bond.update({'use_carrier': '1'}) elif opts['use_carrier'] in _CONFIG_FALSE: bond.update({'use_carrier': '0'}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'use_carrier', valid) else: _log_default_iface(iface, 'use_carrier', bond_def['use_carrier']) bond.update({'use_carrier': bond_def['use_carrier']}) return bond def _parse_settings_bond_4(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond4. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '4'} for binding in ['miimon', 'downdelay', 'updelay', 'lacp_rate', 'ad_select']: if binding in opts: if binding == 'lacp_rate': if opts[binding] == 'fast': opts.update({binding: '1'}) if opts[binding] == 'slow': opts.update({binding: '0'}) valid = ['fast', '1', 'slow', '0'] else: valid = ['integer'] try: int(opts[binding]) bond.update({binding: opts[binding]}) except Exception: _raise_error_iface(iface, binding, valid) else: _log_default_iface(iface, binding, bond_def[binding]) bond.update({binding: bond_def[binding]}) if 'use_carrier' in opts: if opts['use_carrier'] in _CONFIG_TRUE: bond.update({'use_carrier': '1'}) elif opts['use_carrier'] in _CONFIG_FALSE: bond.update({'use_carrier': '0'}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'use_carrier', valid) else: _log_default_iface(iface, 'use_carrier', bond_def['use_carrier']) bond.update({'use_carrier': bond_def['use_carrier']}) if 'hashing-algorithm' in opts: valid = ['layer2', 'layer2+3', 'layer3+4'] if opts['hashing-algorithm'] in valid: bond.update({'xmit_hash_policy': opts['hashing-algorithm']}) else: _raise_error_iface(iface, 'hashing-algorithm', valid) return bond def _parse_settings_bond_5(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond5. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '5'} for binding in ['miimon', 'downdelay', 'updelay']: if binding in opts: try: int(opts[binding]) bond.update({binding: opts[binding]}) except Exception: _raise_error_iface(iface, binding, ['integer']) else: _log_default_iface(iface, binding, bond_def[binding]) bond.update({binding: bond_def[binding]}) if 'use_carrier' in opts: if opts['use_carrier'] in _CONFIG_TRUE: bond.update({'use_carrier': '1'}) elif opts['use_carrier'] in _CONFIG_FALSE: bond.update({'use_carrier': '0'}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'use_carrier', valid) else: _log_default_iface(iface, 'use_carrier', bond_def['use_carrier']) bond.update({'use_carrier': bond_def['use_carrier']}) if 'primary' in opts: bond.update({'primary': opts['primary']}) return bond def _parse_settings_bond_6(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond6. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '6'} for binding in ['miimon', 'downdelay', 'updelay']: if binding in opts: try: int(opts[binding]) bond.update({binding: opts[binding]}) except Exception: _raise_error_iface(iface, binding, ['integer']) else: _log_default_iface(iface, binding, bond_def[binding]) bond.update({binding: bond_def[binding]}) if 'use_carrier' in opts: if opts['use_carrier'] in _CONFIG_TRUE: bond.update({'use_carrier': '1'}) elif opts['use_carrier'] in _CONFIG_FALSE: bond.update({'use_carrier': '0'}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'use_carrier', valid) else: _log_default_iface(iface, 'use_carrier', bond_def['use_carrier']) bond.update({'use_carrier': bond_def['use_carrier']}) if 'primary' in opts: bond.update({'primary': opts['primary']}) return bond def _parse_settings_vlan(opts, iface): ''' Filters given options and outputs valid settings for a vlan ''' vlan = {} if 'reorder_hdr' in opts: if opts['reorder_hdr'] in _CONFIG_TRUE + _CONFIG_FALSE: vlan.update({'reorder_hdr': opts['reorder_hdr']}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'reorder_hdr', valid) if 'vlan_id' in opts: if opts['vlan_id'] > 0: vlan.update({'vlan_id': opts['vlan_id']}) else: _raise_error_iface(iface, 'vlan_id', 'Positive integer') if 'phys_dev' in opts: if opts['phys_dev']: vlan.update({'phys_dev': opts['phys_dev']}) else: _raise_error_iface(iface, 'phys_dev', 'Non-empty string') return vlan def _parse_settings_eth(opts, iface_type, enabled, iface): ''' Filters given options and outputs valid settings for a network interface. ''' result = {'name': iface} if 'proto' in opts: valid = ['none', 'bootp', 'dhcp'] if opts['proto'] in valid: result['proto'] = opts['proto'] else: _raise_error_iface(iface, opts['proto'], valid) if 'dns' in opts: result['dns'] = opts['dns'] result['peerdns'] = 'yes' if 'mtu' in opts: try: result['mtu'] = int(opts['mtu']) except ValueError: _raise_error_iface(iface, 'mtu', ['integer']) if iface_type not in ['bridge']: ethtool = _parse_ethtool_opts(opts, iface) if ethtool: result['ethtool'] = ethtool if iface_type == 'slave': result['proto'] = 'none' if iface_type == 'bond': bonding = _parse_settings_bond(opts, iface) if bonding: result['bonding'] = bonding result['devtype'] = "Bond" if iface_type == 'vlan': vlan = _parse_settings_vlan(opts, iface) if vlan: result['devtype'] = "Vlan" for opt in vlan: result[opt] = opts[opt] if iface_type not in ['bond', 'vlan', 'bridge', 'ipip']: auto_addr = False if 'addr' in opts: if salt.utils.validate.net.mac(opts['addr']): result['addr'] = opts['addr'] elif opts['addr'] == 'auto': auto_addr = True elif opts['addr'] != 'none': _raise_error_iface(iface, opts['addr'], ['AA:BB:CC:DD:EE:FF', 'auto', 'none']) else: auto_addr = True if auto_addr: # If interface type is slave for bond, not setting hwaddr if iface_type != 'slave': ifaces = __salt__['network.interfaces']() if iface in ifaces and 'hwaddr' in ifaces[iface]: result['addr'] = ifaces[iface]['hwaddr'] if iface_type == 'eth': result['devtype'] = 'Ethernet' if iface_type == 'bridge': result['devtype'] = 'Bridge' bypassfirewall = True valid = _CONFIG_TRUE + _CONFIG_FALSE for opt in ['bypassfirewall']: if opt in opts: if opts[opt] in _CONFIG_TRUE: bypassfirewall = True elif opts[opt] in _CONFIG_FALSE: bypassfirewall = False else: _raise_error_iface(iface, opts[opt], valid) bridgectls = [ 'net.bridge.bridge-nf-call-ip6tables', 'net.bridge.bridge-nf-call-iptables', 'net.bridge.bridge-nf-call-arptables', ] if bypassfirewall: sysctl_value = 0 else: sysctl_value = 1 for sysctl in bridgectls: try: __salt__['sysctl.persist'](sysctl, sysctl_value) except CommandExecutionError: log.warning('Failed to set sysctl: %s', sysctl) else: if 'bridge' in opts: result['bridge'] = opts['bridge'] if iface_type == 'ipip': result['devtype'] = 'IPIP' for opt in ['my_inner_ipaddr', 'my_outer_ipaddr']: if opt not in opts: _raise_error_iface(iface, opts[opt], ['1.2.3.4']) else: result[opt] = opts[opt] if iface_type == 'ib': result['devtype'] = 'InfiniBand' if 'prefix' in opts: if 'netmask' in opts: msg = 'Cannot use prefix and netmask together' log.error(msg) raise AttributeError(msg) result['prefix'] = opts['prefix'] elif 'netmask' in opts: result['netmask'] = opts['netmask'] for opt in ['ipaddr', 'master', 'srcaddr', 'delay', 'domain', 'gateway', 'uuid', 'nickname', 'zone']: if opt in opts: result[opt] = opts[opt] for opt in ['ipv6addr', 'ipv6gateway']: if opt in opts: result[opt] = opts[opt] if 'ipaddrs' in opts: result['ipaddrs'] = [] for opt in opts['ipaddrs']: if salt.utils.validate.net.ipv4_addr(opt): ip, prefix = [i.strip() for i in opt.split('/')] result['ipaddrs'].append({'ipaddr': ip, 'prefix': prefix}) else: msg = 'ipv4 CIDR is invalid' log.error(msg) raise AttributeError(msg) if 'ipv6addrs' in opts: for opt in opts['ipv6addrs']: if not salt.utils.validate.net.ipv6_addr(opt): msg = 'ipv6 CIDR is invalid' log.error(msg) raise AttributeError(msg) result['ipv6addrs'] = opts['ipv6addrs'] if 'enable_ipv6' in opts: result['enable_ipv6'] = opts['enable_ipv6'] valid = _CONFIG_TRUE + _CONFIG_FALSE for opt in ['onparent', 'peerdns', 'peerroutes', 'slave', 'vlan', 'defroute', 'stp', 'ipv6_peerdns', 'ipv6_defroute', 'ipv6_peerroutes', 'ipv6_autoconf', 'ipv4_failure_fatal', 'dhcpv6c']: if opt in opts: if opts[opt] in _CONFIG_TRUE: result[opt] = 'yes' elif opts[opt] in _CONFIG_FALSE: result[opt] = 'no' else: _raise_error_iface(iface, opts[opt], valid) if 'onboot' in opts: log.warning( 'The \'onboot\' option is controlled by the \'enabled\' option. ' 'Interface: %s Enabled: %s', iface, enabled ) if enabled: result['onboot'] = 'yes' else: result['onboot'] = 'no' # If the interface is defined then we want to always take # control away from non-root users; unless the administrator # wants to allow non-root users to control the device. if 'userctl' in opts: if opts['userctl'] in _CONFIG_TRUE: result['userctl'] = 'yes' elif opts['userctl'] in _CONFIG_FALSE: result['userctl'] = 'no' else: _raise_error_iface(iface, opts['userctl'], valid) else: result['userctl'] = 'no' # This vlan is in opts, and should be only used in range interface # will affect jinja template for interface generating if 'vlan' in opts: if opts['vlan'] in _CONFIG_TRUE: result['vlan'] = 'yes' elif opts['vlan'] in _CONFIG_FALSE: result['vlan'] = 'no' else: _raise_error_iface(iface, opts['vlan'], valid) if 'arpcheck' in opts: if opts['arpcheck'] in _CONFIG_FALSE: result['arpcheck'] = 'no' if 'ipaddr_start' in opts: result['ipaddr_start'] = opts['ipaddr_start'] if 'ipaddr_end' in opts: result['ipaddr_end'] = opts['ipaddr_end'] if 'clonenum_start' in opts: result['clonenum_start'] = opts['clonenum_start'] # If NetworkManager is available, we can control whether we use # it or not if 'nm_controlled' in opts: if opts['nm_controlled'] in _CONFIG_TRUE: result['nm_controlled'] = 'yes' elif opts['nm_controlled'] in _CONFIG_FALSE: result['nm_controlled'] = 'no' else: _raise_error_iface(iface, opts['nm_controlled'], valid) else: result['nm_controlled'] = 'no' return result def _parse_routes(iface, opts): ''' Filters given options and outputs valid settings for the route settings file. ''' # Normalize keys opts = dict((k.lower(), v) for (k, v) in six.iteritems(opts)) result = {} if 'routes' not in opts: _raise_error_routes(iface, 'routes', 'List of routes') for opt in opts: result[opt] = opts[opt] return result def _parse_network_settings(opts, current): ''' Filters given options and outputs valid settings for the global network settings file. ''' # Normalize keys opts = dict((k.lower(), v) for (k, v) in six.iteritems(opts)) current = dict((k.lower(), v) for (k, v) in six.iteritems(current)) # Check for supported parameters retain_settings = opts.get('retain_settings', False) result = current if retain_settings else {} # Default quote type is an empty string, which will not quote values quote_type = '' valid = _CONFIG_TRUE + _CONFIG_FALSE if 'enabled' not in opts: try: opts['networking'] = current['networking'] # If networking option is quoted, use its quote type quote_type = salt.utils.stringutils.is_quoted(opts['networking']) _log_default_network('networking', current['networking']) except ValueError: _raise_error_network('networking', valid) else: opts['networking'] = opts['enabled'] true_val = '{0}yes{0}'.format(quote_type) false_val = '{0}no{0}'.format(quote_type) networking = salt.utils.stringutils.dequote(opts['networking']) if networking in valid: if networking in _CONFIG_TRUE: result['networking'] = true_val elif networking in _CONFIG_FALSE: result['networking'] = false_val else: _raise_error_network('networking', valid) if 'hostname' not in opts: try: opts['hostname'] = current['hostname'] _log_default_network('hostname', current['hostname']) except Exception: _raise_error_network('hostname', ['server1.example.com']) if opts['hostname']: result['hostname'] = '{1}{0}{1}'.format( salt.utils.stringutils.dequote(opts['hostname']), quote_type) else: _raise_error_network('hostname', ['server1.example.com']) if 'nozeroconf' in opts: nozeroconf = salt.utils.stringutils.dequote(opts['nozeroconf']) if nozeroconf in valid: if nozeroconf in _CONFIG_TRUE: result['nozeroconf'] = true_val elif nozeroconf in _CONFIG_FALSE: result['nozeroconf'] = false_val else: _raise_error_network('nozeroconf', valid) for opt in opts: if opt not in ['networking', 'hostname', 'nozeroconf']: result[opt] = '{1}{0}{1}'.format( salt.utils.stringutils.dequote(opts[opt]), quote_type) return result def _raise_error_iface(iface, option, expected): ''' Log and raise an error with a logical formatted message. ''' msg = _error_msg_iface(iface, option, expected) log.error(msg) raise AttributeError(msg) def _raise_error_network(option, expected): ''' Log and raise an error with a logical formatted message. ''' msg = _error_msg_network(option, expected) log.error(msg) raise AttributeError(msg) def _raise_error_routes(iface, option, expected): ''' Log and raise an error with a logical formatted message. ''' msg = _error_msg_routes(iface, option, expected) log.error(msg) raise AttributeError(msg) def _read_file(path): ''' Reads and returns the contents of a file ''' try: with salt.utils.files.fopen(path, 'rb') as rfh: lines = salt.utils.stringutils.to_unicode(rfh.read()).splitlines() try: lines.remove('') except ValueError: pass return lines except Exception: return [] # Return empty list for type consistency def _write_file_iface(iface, data, folder, pattern): ''' Writes a file to disk ''' filename = os.path.join(folder, pattern.format(iface)) if not os.path.exists(folder): msg = '{0} cannot be written. {1} does not exist' msg = msg.format(filename, folder) log.error(msg) raise AttributeError(msg) with salt.utils.files.fopen(filename, 'w') as fp_: fp_.write(salt.utils.stringutils.to_str(data)) def _write_file_network(data, filename): ''' Writes a file to disk ''' with salt.utils.files.fopen(filename, 'w') as fp_: fp_.write(salt.utils.stringutils.to_str(data)) def _read_temp(data): lines = data.splitlines() try: # Discard newlines if they exist lines.remove('') except ValueError: pass return lines def build_bond(iface, **settings): ''' Create a bond script in /etc/modprobe.d with the passed settings and load the bonding kernel module. CLI Example: .. code-block:: bash salt '*' ip.build_bond bond0 mode=balance-alb ''' rh_major = __grains__['osrelease'][:1] opts = _parse_settings_bond(settings, iface) try: template = JINJA.get_template('conf.jinja') except jinja2.exceptions.TemplateNotFound: log.error('Could not load template conf.jinja') return '' data = template.render({'name': iface, 'bonding': opts}) _write_file_iface(iface, data, _RH_NETWORK_CONF_FILES, '{0}.conf'.format(iface)) path = os.path.join(_RH_NETWORK_CONF_FILES, '{0}.conf'.format(iface)) if rh_major == '5': __salt__['cmd.run']( 'sed -i -e "/^alias\\s{0}.*/d" /etc/modprobe.conf'.format(iface), python_shell=False ) __salt__['cmd.run']( 'sed -i -e "/^options\\s{0}.*/d" /etc/modprobe.conf'.format(iface), python_shell=False ) __salt__['file.append']('/etc/modprobe.conf', path) __salt__['kmod.load']('bonding') if settings['test']: return _read_temp(data) return _read_file(path) def build_interface(iface, iface_type, enabled, **settings): ''' Build an interface script for a network interface. CLI Example: .. code-block:: bash salt '*' ip.build_interface eth0 eth <settings> ''' if __grains__['os'] == 'Fedora': if __grains__['osmajorrelease'] >= 18: rh_major = '7' else: rh_major = '6' else: rh_major = __grains__['osrelease'][:1] iface_type = iface_type.lower() if iface_type not in _IFACE_TYPES: _raise_error_iface(iface, iface_type, _IFACE_TYPES) if iface_type == 'slave': settings['slave'] = 'yes' if 'master' not in settings: msg = 'master is a required setting for slave interfaces' log.error(msg) raise AttributeError(msg) if iface_type == 'vlan': settings['vlan'] = 'yes' if iface_type == 'bridge': __salt__['pkg.install']('bridge-utils') if iface_type in ['eth', 'bond', 'bridge', 'slave', 'vlan', 'ipip', 'ib', 'alias']: opts = _parse_settings_eth(settings, iface_type, enabled, iface) try: template = JINJA.get_template('rh{0}_eth.jinja'.format(rh_major)) except jinja2.exceptions.TemplateNotFound: log.error( 'Could not load template rh%s_eth.jinja', rh_major ) return '' ifcfg = template.render(opts) if 'test' in settings and settings['test']: return _read_temp(ifcfg) _write_file_iface(iface, ifcfg, _RH_NETWORK_SCRIPT_DIR, 'ifcfg-{0}') path = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'ifcfg-{0}'.format(iface)) return _read_file(path) def down(iface, iface_type): ''' Shutdown a network interface CLI Example: .. code-block:: bash salt '*' ip.down eth0 ''' # Slave devices are controlled by the master. if iface_type not in ['slave']: return __salt__['cmd.run']('ifdown {0}'.format(iface)) return None def get_bond(iface): ''' Return the content of a bond script CLI Example: .. code-block:: bash salt '*' ip.get_bond bond0 ''' path = os.path.join(_RH_NETWORK_CONF_FILES, '{0}.conf'.format(iface)) return _read_file(path) def get_interface(iface): ''' Return the contents of an interface script CLI Example: .. code-block:: bash salt '*' ip.get_interface eth0 ''' path = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'ifcfg-{0}'.format(iface)) return _read_file(path) def up(iface, iface_type): # pylint: disable=C0103 ''' Start up a network interface CLI Example: .. code-block:: bash salt '*' ip.up eth0 ''' # Slave devices are controlled by the master. if iface_type not in ['slave']: return __salt__['cmd.run']('ifup {0}'.format(iface)) return None def get_routes(iface): ''' Return the contents of the interface routes script. CLI Example: .. code-block:: bash salt '*' ip.get_routes eth0 ''' path = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'route-{0}'.format(iface)) path6 = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'route6-{0}'.format(iface)) routes = _read_file(path) routes.extend(_read_file(path6)) return routes def get_network_settings(): ''' Return the contents of the global network script. CLI Example: .. code-block:: bash salt '*' ip.get_network_settings ''' return _read_file(_RH_NETWORK_FILE) def apply_network_settings(**settings): ''' Apply global network configuration. CLI Example: .. code-block:: bash salt '*' ip.apply_network_settings ''' if 'require_reboot' not in settings: settings['require_reboot'] = False if 'apply_hostname' not in settings: settings['apply_hostname'] = False hostname_res = True if settings['apply_hostname'] in _CONFIG_TRUE: if 'hostname' in settings: hostname_res = __salt__['network.mod_hostname'](settings['hostname']) else: log.warning( 'The network state sls is trying to apply hostname ' 'changes but no hostname is defined.' ) hostname_res = False res = True if settings['require_reboot'] in _CONFIG_TRUE: log.warning( 'The network state sls is requiring a reboot of the system to ' 'properly apply network configuration.' ) res = True else: res = __salt__['service.restart']('network') return hostname_res and res def build_network_settings(**settings): ''' Build the global network script. CLI Example: .. code-block:: bash salt '*' ip.build_network_settings <settings> ''' # Read current configuration and store default values current_network_settings = _parse_rh_config(_RH_NETWORK_FILE) # Build settings opts = _parse_network_settings(settings, current_network_settings) try: template = JINJA.get_template('network.jinja') except jinja2.exceptions.TemplateNotFound: log.error('Could not load template network.jinja') return '' network = template.render(opts) if settings['test']: return _read_temp(network) # Write settings _write_file_network(network, _RH_NETWORK_FILE) return _read_file(_RH_NETWORK_FILE)
saltstack/salt
salt/modules/rh_ip.py
get_bond
python
def get_bond(iface): ''' Return the content of a bond script CLI Example: .. code-block:: bash salt '*' ip.get_bond bond0 ''' path = os.path.join(_RH_NETWORK_CONF_FILES, '{0}.conf'.format(iface)) return _read_file(path)
Return the content of a bond script CLI Example: .. code-block:: bash salt '*' ip.get_bond bond0
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/rh_ip.py#L1149-L1160
[ "def _read_file(path):\n '''\n Reads and returns the contents of a file\n '''\n try:\n with salt.utils.files.fopen(path, 'rb') as rfh:\n lines = salt.utils.stringutils.to_unicode(rfh.read()).splitlines()\n try:\n lines.remove('')\n except ValueError:\n pass\n return lines\n except Exception:\n return [] # Return empty list for type consistency\n" ]
# -*- coding: utf-8 -*- ''' The networking module for RHEL/Fedora based distros ''' from __future__ import absolute_import, unicode_literals, print_function # Import python libs import logging import os.path import os # Import third party libs import jinja2 import jinja2.exceptions # Import salt libs import salt.utils.files import salt.utils.stringutils import salt.utils.templates import salt.utils.validate.net from salt.exceptions import CommandExecutionError from salt.ext import six # Set up logging log = logging.getLogger(__name__) # Set up template environment JINJA = jinja2.Environment( loader=jinja2.FileSystemLoader( os.path.join(salt.utils.templates.TEMPLATE_DIRNAME, 'rh_ip') ) ) # Define the module's virtual name __virtualname__ = 'ip' def __virtual__(): ''' Confine this module to RHEL/Fedora based distros ''' if __grains__['os_family'] == 'RedHat': return __virtualname__ return (False, 'The rh_ip execution module cannot be loaded: this module is only available on RHEL/Fedora based distributions.') # Setup networking attributes _ETHTOOL_CONFIG_OPTS = [ 'autoneg', 'speed', 'duplex', 'rx', 'tx', 'sg', 'tso', 'ufo', 'gso', 'gro', 'lro', 'advertise' ] _RH_CONFIG_OPTS = [ 'domain', 'peerdns', 'peerntp', 'defroute', 'mtu', 'static-routes', 'gateway', 'zone' ] _RH_CONFIG_BONDING_OPTS = [ 'mode', 'miimon', 'arp_interval', 'arp_ip_target', 'downdelay', 'updelay', 'use_carrier', 'lacp_rate', 'hashing-algorithm', 'max_bonds', 'tx_queues', 'num_grat_arp', 'num_unsol_na', 'primary', 'primary_reselect', 'ad_select', 'xmit_hash_policy', 'arp_validate', 'fail_over_mac', 'all_slaves_active', 'resend_igmp' ] _RH_NETWORK_SCRIPT_DIR = '/etc/sysconfig/network-scripts' _RH_NETWORK_FILE = '/etc/sysconfig/network' _RH_NETWORK_CONF_FILES = '/etc/modprobe.d' _CONFIG_TRUE = ['yes', 'on', 'true', '1', True] _CONFIG_FALSE = ['no', 'off', 'false', '0', False] _IFACE_TYPES = [ 'eth', 'bond', 'alias', 'clone', 'ipsec', 'dialup', 'bridge', 'slave', 'vlan', 'ipip', 'ib', ] def _error_msg_iface(iface, option, expected): ''' Build an appropriate error message from a given option and a list of expected values. ''' msg = 'Invalid option -- Interface: {0}, Option: {1}, Expected: [{2}]' return msg.format(iface, option, '|'.join(str(e) for e in expected)) def _error_msg_routes(iface, option, expected): ''' Build an appropriate error message from a given option and a list of expected values. ''' msg = 'Invalid option -- Route interface: {0}, Option: {1}, Expected: [{2}]' return msg.format(iface, option, expected) def _log_default_iface(iface, opt, value): log.info('Using default option -- Interface: %s Option: %s Value: %s', iface, opt, value) def _error_msg_network(option, expected): ''' Build an appropriate error message from a given option and a list of expected values. ''' msg = 'Invalid network setting -- Setting: {0}, Expected: [{1}]' return msg.format(option, '|'.join(str(e) for e in expected)) def _log_default_network(opt, value): log.info('Using existing setting -- Setting: %s Value: %s', opt, value) def _parse_rh_config(path): rh_config = _read_file(path) cv_rh_config = {} if rh_config: for line in rh_config: line = line.strip() if not line or line.startswith('!') or line.startswith('#'): continue pair = [p.rstrip() for p in line.split('=', 1)] if len(pair) != 2: continue name, value = pair cv_rh_config[name.upper()] = value return cv_rh_config def _parse_ethtool_opts(opts, iface): ''' Filters given options and outputs valid settings for ETHTOOLS_OPTS If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' config = {} if 'autoneg' in opts: if opts['autoneg'] in _CONFIG_TRUE: config.update({'autoneg': 'on'}) elif opts['autoneg'] in _CONFIG_FALSE: config.update({'autoneg': 'off'}) else: _raise_error_iface(iface, 'autoneg', _CONFIG_TRUE + _CONFIG_FALSE) if 'duplex' in opts: valid = ['full', 'half'] if opts['duplex'] in valid: config.update({'duplex': opts['duplex']}) else: _raise_error_iface(iface, 'duplex', valid) if 'speed' in opts: valid = ['10', '100', '1000', '10000'] if six.text_type(opts['speed']) in valid: config.update({'speed': opts['speed']}) else: _raise_error_iface(iface, opts['speed'], valid) if 'advertise' in opts: valid = [ '0x001', '0x002', '0x004', '0x008', '0x010', '0x020', '0x20000', '0x8000', '0x1000', '0x40000', '0x80000', '0x200000', '0x400000', '0x800000', '0x1000000', '0x2000000', '0x4000000' ] if six.text_type(opts['advertise']) in valid: config.update({'advertise': opts['advertise']}) else: _raise_error_iface(iface, 'advertise', valid) valid = _CONFIG_TRUE + _CONFIG_FALSE for option in ('rx', 'tx', 'sg', 'tso', 'ufo', 'gso', 'gro', 'lro'): if option in opts: if opts[option] in _CONFIG_TRUE: config.update({option: 'on'}) elif opts[option] in _CONFIG_FALSE: config.update({option: 'off'}) else: _raise_error_iface(iface, option, valid) return config def _parse_settings_bond(opts, iface): ''' Filters given options and outputs valid settings for requested operation. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond_def = { # 803.ad aggregation selection logic # 0 for stable (default) # 1 for bandwidth # 2 for count 'ad_select': '0', # Max number of transmit queues (default = 16) 'tx_queues': '16', # Link monitoring in milliseconds. Most NICs support this 'miimon': '100', # ARP interval in milliseconds 'arp_interval': '250', # Delay before considering link down in milliseconds (miimon * 2) 'downdelay': '200', # lacp_rate 0: Slow - every 30 seconds # lacp_rate 1: Fast - every 1 second 'lacp_rate': '0', # Max bonds for this driver 'max_bonds': '1', # Specifies the time, in milliseconds, to wait before # enabling a slave after a link recovery has been # detected. Only used with miimon. 'updelay': '0', # Used with miimon. # On: driver sends mii # Off: ethtool sends mii 'use_carrier': '0', # Default. Don't change unless you know what you are doing. 'xmit_hash_policy': 'layer2', } if opts['mode'] in ['balance-rr', '0']: log.info( 'Device: %s Bonding Mode: load balancing (round-robin)', iface ) return _parse_settings_bond_0(opts, iface, bond_def) elif opts['mode'] in ['active-backup', '1']: log.info( 'Device: %s Bonding Mode: fault-tolerance (active-backup)', iface ) return _parse_settings_bond_1(opts, iface, bond_def) elif opts['mode'] in ['balance-xor', '2']: log.info( 'Device: %s Bonding Mode: load balancing (xor)', iface ) return _parse_settings_bond_2(opts, iface, bond_def) elif opts['mode'] in ['broadcast', '3']: log.info( 'Device: %s Bonding Mode: fault-tolerance (broadcast)', iface ) return _parse_settings_bond_3(opts, iface, bond_def) elif opts['mode'] in ['802.3ad', '4']: log.info( 'Device: %s Bonding Mode: IEEE 802.3ad Dynamic link ' 'aggregation', iface ) return _parse_settings_bond_4(opts, iface, bond_def) elif opts['mode'] in ['balance-tlb', '5']: log.info( 'Device: %s Bonding Mode: transmit load balancing', iface ) return _parse_settings_bond_5(opts, iface, bond_def) elif opts['mode'] in ['balance-alb', '6']: log.info( 'Device: %s Bonding Mode: adaptive load balancing', iface ) return _parse_settings_bond_6(opts, iface, bond_def) else: valid = [ '0', '1', '2', '3', '4', '5', '6', 'balance-rr', 'active-backup', 'balance-xor', 'broadcast', '802.3ad', 'balance-tlb', 'balance-alb' ] _raise_error_iface(iface, 'mode', valid) def _parse_settings_bond_0(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond0. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' # balance-rr shares miimon settings with balance-xor bond = _parse_settings_bond_1(opts, iface, bond_def) bond.update({'mode': '0'}) # ARP targets in n.n.n.n form valid = ['list of ips (up to 16)'] if 'arp_ip_target' in opts: if isinstance(opts['arp_ip_target'], list): if 1 <= len(opts['arp_ip_target']) <= 16: bond.update({'arp_ip_target': ''}) for ip in opts['arp_ip_target']: # pylint: disable=C0103 if bond['arp_ip_target']: bond['arp_ip_target'] = bond['arp_ip_target'] + ',' + ip else: bond['arp_ip_target'] = ip else: _raise_error_iface(iface, 'arp_ip_target', valid) else: _raise_error_iface(iface, 'arp_ip_target', valid) elif 'miimon' not in opts: _raise_error_iface(iface, 'arp_ip_target', valid) if 'arp_interval' in opts: try: int(opts['arp_interval']) bond.update({'arp_interval': opts['arp_interval']}) except Exception: _raise_error_iface(iface, 'arp_interval', ['integer']) else: _log_default_iface(iface, 'arp_interval', bond_def['arp_interval']) bond.update({'arp_interval': bond_def['arp_interval']}) return bond def _parse_settings_bond_1(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond1. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '1'} for binding in ['miimon', 'downdelay', 'updelay']: if binding in opts: try: int(opts[binding]) bond.update({binding: opts[binding]}) except Exception: _raise_error_iface(iface, binding, ['integer']) else: _log_default_iface(iface, binding, bond_def[binding]) bond.update({binding: bond_def[binding]}) if 'use_carrier' in opts: if opts['use_carrier'] in _CONFIG_TRUE: bond.update({'use_carrier': '1'}) elif opts['use_carrier'] in _CONFIG_FALSE: bond.update({'use_carrier': '0'}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'use_carrier', valid) else: _log_default_iface(iface, 'use_carrier', bond_def['use_carrier']) bond.update({'use_carrier': bond_def['use_carrier']}) if 'primary' in opts: bond.update({'primary': opts['primary']}) return bond def _parse_settings_bond_2(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond2. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '2'} valid = ['list of ips (up to 16)'] if 'arp_ip_target' in opts: if isinstance(opts['arp_ip_target'], list): if 1 <= len(opts['arp_ip_target']) <= 16: bond.update({'arp_ip_target': ''}) for ip in opts['arp_ip_target']: # pylint: disable=C0103 if bond['arp_ip_target']: bond['arp_ip_target'] = bond['arp_ip_target'] + ',' + ip else: bond['arp_ip_target'] = ip else: _raise_error_iface(iface, 'arp_ip_target', valid) else: _raise_error_iface(iface, 'arp_ip_target', valid) else: _raise_error_iface(iface, 'arp_ip_target', valid) if 'arp_interval' in opts: try: int(opts['arp_interval']) bond.update({'arp_interval': opts['arp_interval']}) except Exception: _raise_error_iface(iface, 'arp_interval', ['integer']) else: _log_default_iface(iface, 'arp_interval', bond_def['arp_interval']) bond.update({'arp_interval': bond_def['arp_interval']}) if 'hashing-algorithm' in opts: valid = ['layer2', 'layer2+3', 'layer3+4'] if opts['hashing-algorithm'] in valid: bond.update({'xmit_hash_policy': opts['hashing-algorithm']}) else: _raise_error_iface(iface, 'hashing-algorithm', valid) return bond def _parse_settings_bond_3(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond3. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '3'} for binding in ['miimon', 'downdelay', 'updelay']: if binding in opts: try: int(opts[binding]) bond.update({binding: opts[binding]}) except Exception: _raise_error_iface(iface, binding, ['integer']) else: _log_default_iface(iface, binding, bond_def[binding]) bond.update({binding: bond_def[binding]}) if 'use_carrier' in opts: if opts['use_carrier'] in _CONFIG_TRUE: bond.update({'use_carrier': '1'}) elif opts['use_carrier'] in _CONFIG_FALSE: bond.update({'use_carrier': '0'}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'use_carrier', valid) else: _log_default_iface(iface, 'use_carrier', bond_def['use_carrier']) bond.update({'use_carrier': bond_def['use_carrier']}) return bond def _parse_settings_bond_4(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond4. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '4'} for binding in ['miimon', 'downdelay', 'updelay', 'lacp_rate', 'ad_select']: if binding in opts: if binding == 'lacp_rate': if opts[binding] == 'fast': opts.update({binding: '1'}) if opts[binding] == 'slow': opts.update({binding: '0'}) valid = ['fast', '1', 'slow', '0'] else: valid = ['integer'] try: int(opts[binding]) bond.update({binding: opts[binding]}) except Exception: _raise_error_iface(iface, binding, valid) else: _log_default_iface(iface, binding, bond_def[binding]) bond.update({binding: bond_def[binding]}) if 'use_carrier' in opts: if opts['use_carrier'] in _CONFIG_TRUE: bond.update({'use_carrier': '1'}) elif opts['use_carrier'] in _CONFIG_FALSE: bond.update({'use_carrier': '0'}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'use_carrier', valid) else: _log_default_iface(iface, 'use_carrier', bond_def['use_carrier']) bond.update({'use_carrier': bond_def['use_carrier']}) if 'hashing-algorithm' in opts: valid = ['layer2', 'layer2+3', 'layer3+4'] if opts['hashing-algorithm'] in valid: bond.update({'xmit_hash_policy': opts['hashing-algorithm']}) else: _raise_error_iface(iface, 'hashing-algorithm', valid) return bond def _parse_settings_bond_5(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond5. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '5'} for binding in ['miimon', 'downdelay', 'updelay']: if binding in opts: try: int(opts[binding]) bond.update({binding: opts[binding]}) except Exception: _raise_error_iface(iface, binding, ['integer']) else: _log_default_iface(iface, binding, bond_def[binding]) bond.update({binding: bond_def[binding]}) if 'use_carrier' in opts: if opts['use_carrier'] in _CONFIG_TRUE: bond.update({'use_carrier': '1'}) elif opts['use_carrier'] in _CONFIG_FALSE: bond.update({'use_carrier': '0'}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'use_carrier', valid) else: _log_default_iface(iface, 'use_carrier', bond_def['use_carrier']) bond.update({'use_carrier': bond_def['use_carrier']}) if 'primary' in opts: bond.update({'primary': opts['primary']}) return bond def _parse_settings_bond_6(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond6. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '6'} for binding in ['miimon', 'downdelay', 'updelay']: if binding in opts: try: int(opts[binding]) bond.update({binding: opts[binding]}) except Exception: _raise_error_iface(iface, binding, ['integer']) else: _log_default_iface(iface, binding, bond_def[binding]) bond.update({binding: bond_def[binding]}) if 'use_carrier' in opts: if opts['use_carrier'] in _CONFIG_TRUE: bond.update({'use_carrier': '1'}) elif opts['use_carrier'] in _CONFIG_FALSE: bond.update({'use_carrier': '0'}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'use_carrier', valid) else: _log_default_iface(iface, 'use_carrier', bond_def['use_carrier']) bond.update({'use_carrier': bond_def['use_carrier']}) if 'primary' in opts: bond.update({'primary': opts['primary']}) return bond def _parse_settings_vlan(opts, iface): ''' Filters given options and outputs valid settings for a vlan ''' vlan = {} if 'reorder_hdr' in opts: if opts['reorder_hdr'] in _CONFIG_TRUE + _CONFIG_FALSE: vlan.update({'reorder_hdr': opts['reorder_hdr']}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'reorder_hdr', valid) if 'vlan_id' in opts: if opts['vlan_id'] > 0: vlan.update({'vlan_id': opts['vlan_id']}) else: _raise_error_iface(iface, 'vlan_id', 'Positive integer') if 'phys_dev' in opts: if opts['phys_dev']: vlan.update({'phys_dev': opts['phys_dev']}) else: _raise_error_iface(iface, 'phys_dev', 'Non-empty string') return vlan def _parse_settings_eth(opts, iface_type, enabled, iface): ''' Filters given options and outputs valid settings for a network interface. ''' result = {'name': iface} if 'proto' in opts: valid = ['none', 'bootp', 'dhcp'] if opts['proto'] in valid: result['proto'] = opts['proto'] else: _raise_error_iface(iface, opts['proto'], valid) if 'dns' in opts: result['dns'] = opts['dns'] result['peerdns'] = 'yes' if 'mtu' in opts: try: result['mtu'] = int(opts['mtu']) except ValueError: _raise_error_iface(iface, 'mtu', ['integer']) if iface_type not in ['bridge']: ethtool = _parse_ethtool_opts(opts, iface) if ethtool: result['ethtool'] = ethtool if iface_type == 'slave': result['proto'] = 'none' if iface_type == 'bond': bonding = _parse_settings_bond(opts, iface) if bonding: result['bonding'] = bonding result['devtype'] = "Bond" if iface_type == 'vlan': vlan = _parse_settings_vlan(opts, iface) if vlan: result['devtype'] = "Vlan" for opt in vlan: result[opt] = opts[opt] if iface_type not in ['bond', 'vlan', 'bridge', 'ipip']: auto_addr = False if 'addr' in opts: if salt.utils.validate.net.mac(opts['addr']): result['addr'] = opts['addr'] elif opts['addr'] == 'auto': auto_addr = True elif opts['addr'] != 'none': _raise_error_iface(iface, opts['addr'], ['AA:BB:CC:DD:EE:FF', 'auto', 'none']) else: auto_addr = True if auto_addr: # If interface type is slave for bond, not setting hwaddr if iface_type != 'slave': ifaces = __salt__['network.interfaces']() if iface in ifaces and 'hwaddr' in ifaces[iface]: result['addr'] = ifaces[iface]['hwaddr'] if iface_type == 'eth': result['devtype'] = 'Ethernet' if iface_type == 'bridge': result['devtype'] = 'Bridge' bypassfirewall = True valid = _CONFIG_TRUE + _CONFIG_FALSE for opt in ['bypassfirewall']: if opt in opts: if opts[opt] in _CONFIG_TRUE: bypassfirewall = True elif opts[opt] in _CONFIG_FALSE: bypassfirewall = False else: _raise_error_iface(iface, opts[opt], valid) bridgectls = [ 'net.bridge.bridge-nf-call-ip6tables', 'net.bridge.bridge-nf-call-iptables', 'net.bridge.bridge-nf-call-arptables', ] if bypassfirewall: sysctl_value = 0 else: sysctl_value = 1 for sysctl in bridgectls: try: __salt__['sysctl.persist'](sysctl, sysctl_value) except CommandExecutionError: log.warning('Failed to set sysctl: %s', sysctl) else: if 'bridge' in opts: result['bridge'] = opts['bridge'] if iface_type == 'ipip': result['devtype'] = 'IPIP' for opt in ['my_inner_ipaddr', 'my_outer_ipaddr']: if opt not in opts: _raise_error_iface(iface, opts[opt], ['1.2.3.4']) else: result[opt] = opts[opt] if iface_type == 'ib': result['devtype'] = 'InfiniBand' if 'prefix' in opts: if 'netmask' in opts: msg = 'Cannot use prefix and netmask together' log.error(msg) raise AttributeError(msg) result['prefix'] = opts['prefix'] elif 'netmask' in opts: result['netmask'] = opts['netmask'] for opt in ['ipaddr', 'master', 'srcaddr', 'delay', 'domain', 'gateway', 'uuid', 'nickname', 'zone']: if opt in opts: result[opt] = opts[opt] for opt in ['ipv6addr', 'ipv6gateway']: if opt in opts: result[opt] = opts[opt] if 'ipaddrs' in opts: result['ipaddrs'] = [] for opt in opts['ipaddrs']: if salt.utils.validate.net.ipv4_addr(opt): ip, prefix = [i.strip() for i in opt.split('/')] result['ipaddrs'].append({'ipaddr': ip, 'prefix': prefix}) else: msg = 'ipv4 CIDR is invalid' log.error(msg) raise AttributeError(msg) if 'ipv6addrs' in opts: for opt in opts['ipv6addrs']: if not salt.utils.validate.net.ipv6_addr(opt): msg = 'ipv6 CIDR is invalid' log.error(msg) raise AttributeError(msg) result['ipv6addrs'] = opts['ipv6addrs'] if 'enable_ipv6' in opts: result['enable_ipv6'] = opts['enable_ipv6'] valid = _CONFIG_TRUE + _CONFIG_FALSE for opt in ['onparent', 'peerdns', 'peerroutes', 'slave', 'vlan', 'defroute', 'stp', 'ipv6_peerdns', 'ipv6_defroute', 'ipv6_peerroutes', 'ipv6_autoconf', 'ipv4_failure_fatal', 'dhcpv6c']: if opt in opts: if opts[opt] in _CONFIG_TRUE: result[opt] = 'yes' elif opts[opt] in _CONFIG_FALSE: result[opt] = 'no' else: _raise_error_iface(iface, opts[opt], valid) if 'onboot' in opts: log.warning( 'The \'onboot\' option is controlled by the \'enabled\' option. ' 'Interface: %s Enabled: %s', iface, enabled ) if enabled: result['onboot'] = 'yes' else: result['onboot'] = 'no' # If the interface is defined then we want to always take # control away from non-root users; unless the administrator # wants to allow non-root users to control the device. if 'userctl' in opts: if opts['userctl'] in _CONFIG_TRUE: result['userctl'] = 'yes' elif opts['userctl'] in _CONFIG_FALSE: result['userctl'] = 'no' else: _raise_error_iface(iface, opts['userctl'], valid) else: result['userctl'] = 'no' # This vlan is in opts, and should be only used in range interface # will affect jinja template for interface generating if 'vlan' in opts: if opts['vlan'] in _CONFIG_TRUE: result['vlan'] = 'yes' elif opts['vlan'] in _CONFIG_FALSE: result['vlan'] = 'no' else: _raise_error_iface(iface, opts['vlan'], valid) if 'arpcheck' in opts: if opts['arpcheck'] in _CONFIG_FALSE: result['arpcheck'] = 'no' if 'ipaddr_start' in opts: result['ipaddr_start'] = opts['ipaddr_start'] if 'ipaddr_end' in opts: result['ipaddr_end'] = opts['ipaddr_end'] if 'clonenum_start' in opts: result['clonenum_start'] = opts['clonenum_start'] # If NetworkManager is available, we can control whether we use # it or not if 'nm_controlled' in opts: if opts['nm_controlled'] in _CONFIG_TRUE: result['nm_controlled'] = 'yes' elif opts['nm_controlled'] in _CONFIG_FALSE: result['nm_controlled'] = 'no' else: _raise_error_iface(iface, opts['nm_controlled'], valid) else: result['nm_controlled'] = 'no' return result def _parse_routes(iface, opts): ''' Filters given options and outputs valid settings for the route settings file. ''' # Normalize keys opts = dict((k.lower(), v) for (k, v) in six.iteritems(opts)) result = {} if 'routes' not in opts: _raise_error_routes(iface, 'routes', 'List of routes') for opt in opts: result[opt] = opts[opt] return result def _parse_network_settings(opts, current): ''' Filters given options and outputs valid settings for the global network settings file. ''' # Normalize keys opts = dict((k.lower(), v) for (k, v) in six.iteritems(opts)) current = dict((k.lower(), v) for (k, v) in six.iteritems(current)) # Check for supported parameters retain_settings = opts.get('retain_settings', False) result = current if retain_settings else {} # Default quote type is an empty string, which will not quote values quote_type = '' valid = _CONFIG_TRUE + _CONFIG_FALSE if 'enabled' not in opts: try: opts['networking'] = current['networking'] # If networking option is quoted, use its quote type quote_type = salt.utils.stringutils.is_quoted(opts['networking']) _log_default_network('networking', current['networking']) except ValueError: _raise_error_network('networking', valid) else: opts['networking'] = opts['enabled'] true_val = '{0}yes{0}'.format(quote_type) false_val = '{0}no{0}'.format(quote_type) networking = salt.utils.stringutils.dequote(opts['networking']) if networking in valid: if networking in _CONFIG_TRUE: result['networking'] = true_val elif networking in _CONFIG_FALSE: result['networking'] = false_val else: _raise_error_network('networking', valid) if 'hostname' not in opts: try: opts['hostname'] = current['hostname'] _log_default_network('hostname', current['hostname']) except Exception: _raise_error_network('hostname', ['server1.example.com']) if opts['hostname']: result['hostname'] = '{1}{0}{1}'.format( salt.utils.stringutils.dequote(opts['hostname']), quote_type) else: _raise_error_network('hostname', ['server1.example.com']) if 'nozeroconf' in opts: nozeroconf = salt.utils.stringutils.dequote(opts['nozeroconf']) if nozeroconf in valid: if nozeroconf in _CONFIG_TRUE: result['nozeroconf'] = true_val elif nozeroconf in _CONFIG_FALSE: result['nozeroconf'] = false_val else: _raise_error_network('nozeroconf', valid) for opt in opts: if opt not in ['networking', 'hostname', 'nozeroconf']: result[opt] = '{1}{0}{1}'.format( salt.utils.stringutils.dequote(opts[opt]), quote_type) return result def _raise_error_iface(iface, option, expected): ''' Log and raise an error with a logical formatted message. ''' msg = _error_msg_iface(iface, option, expected) log.error(msg) raise AttributeError(msg) def _raise_error_network(option, expected): ''' Log and raise an error with a logical formatted message. ''' msg = _error_msg_network(option, expected) log.error(msg) raise AttributeError(msg) def _raise_error_routes(iface, option, expected): ''' Log and raise an error with a logical formatted message. ''' msg = _error_msg_routes(iface, option, expected) log.error(msg) raise AttributeError(msg) def _read_file(path): ''' Reads and returns the contents of a file ''' try: with salt.utils.files.fopen(path, 'rb') as rfh: lines = salt.utils.stringutils.to_unicode(rfh.read()).splitlines() try: lines.remove('') except ValueError: pass return lines except Exception: return [] # Return empty list for type consistency def _write_file_iface(iface, data, folder, pattern): ''' Writes a file to disk ''' filename = os.path.join(folder, pattern.format(iface)) if not os.path.exists(folder): msg = '{0} cannot be written. {1} does not exist' msg = msg.format(filename, folder) log.error(msg) raise AttributeError(msg) with salt.utils.files.fopen(filename, 'w') as fp_: fp_.write(salt.utils.stringutils.to_str(data)) def _write_file_network(data, filename): ''' Writes a file to disk ''' with salt.utils.files.fopen(filename, 'w') as fp_: fp_.write(salt.utils.stringutils.to_str(data)) def _read_temp(data): lines = data.splitlines() try: # Discard newlines if they exist lines.remove('') except ValueError: pass return lines def build_bond(iface, **settings): ''' Create a bond script in /etc/modprobe.d with the passed settings and load the bonding kernel module. CLI Example: .. code-block:: bash salt '*' ip.build_bond bond0 mode=balance-alb ''' rh_major = __grains__['osrelease'][:1] opts = _parse_settings_bond(settings, iface) try: template = JINJA.get_template('conf.jinja') except jinja2.exceptions.TemplateNotFound: log.error('Could not load template conf.jinja') return '' data = template.render({'name': iface, 'bonding': opts}) _write_file_iface(iface, data, _RH_NETWORK_CONF_FILES, '{0}.conf'.format(iface)) path = os.path.join(_RH_NETWORK_CONF_FILES, '{0}.conf'.format(iface)) if rh_major == '5': __salt__['cmd.run']( 'sed -i -e "/^alias\\s{0}.*/d" /etc/modprobe.conf'.format(iface), python_shell=False ) __salt__['cmd.run']( 'sed -i -e "/^options\\s{0}.*/d" /etc/modprobe.conf'.format(iface), python_shell=False ) __salt__['file.append']('/etc/modprobe.conf', path) __salt__['kmod.load']('bonding') if settings['test']: return _read_temp(data) return _read_file(path) def build_interface(iface, iface_type, enabled, **settings): ''' Build an interface script for a network interface. CLI Example: .. code-block:: bash salt '*' ip.build_interface eth0 eth <settings> ''' if __grains__['os'] == 'Fedora': if __grains__['osmajorrelease'] >= 18: rh_major = '7' else: rh_major = '6' else: rh_major = __grains__['osrelease'][:1] iface_type = iface_type.lower() if iface_type not in _IFACE_TYPES: _raise_error_iface(iface, iface_type, _IFACE_TYPES) if iface_type == 'slave': settings['slave'] = 'yes' if 'master' not in settings: msg = 'master is a required setting for slave interfaces' log.error(msg) raise AttributeError(msg) if iface_type == 'vlan': settings['vlan'] = 'yes' if iface_type == 'bridge': __salt__['pkg.install']('bridge-utils') if iface_type in ['eth', 'bond', 'bridge', 'slave', 'vlan', 'ipip', 'ib', 'alias']: opts = _parse_settings_eth(settings, iface_type, enabled, iface) try: template = JINJA.get_template('rh{0}_eth.jinja'.format(rh_major)) except jinja2.exceptions.TemplateNotFound: log.error( 'Could not load template rh%s_eth.jinja', rh_major ) return '' ifcfg = template.render(opts) if 'test' in settings and settings['test']: return _read_temp(ifcfg) _write_file_iface(iface, ifcfg, _RH_NETWORK_SCRIPT_DIR, 'ifcfg-{0}') path = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'ifcfg-{0}'.format(iface)) return _read_file(path) def build_routes(iface, **settings): ''' Build a route script for a network interface. CLI Example: .. code-block:: bash salt '*' ip.build_routes eth0 <settings> ''' template = 'rh6_route_eth.jinja' try: if int(__grains__['osrelease'][0]) < 6: template = 'route_eth.jinja' except ValueError: pass log.debug('Template name: %s', template) opts = _parse_routes(iface, settings) log.debug('Opts: \n %s', opts) try: template = JINJA.get_template(template) except jinja2.exceptions.TemplateNotFound: log.error('Could not load template %s', template) return '' opts6 = [] opts4 = [] for route in opts['routes']: ipaddr = route['ipaddr'] if salt.utils.validate.net.ipv6_addr(ipaddr): opts6.append(route) else: opts4.append(route) log.debug("IPv4 routes:\n%s", opts4) log.debug("IPv6 routes:\n%s", opts6) routecfg = template.render(routes=opts4, iface=iface) routecfg6 = template.render(routes=opts6, iface=iface) if settings['test']: routes = _read_temp(routecfg) routes.extend(_read_temp(routecfg6)) return routes _write_file_iface(iface, routecfg, _RH_NETWORK_SCRIPT_DIR, 'route-{0}') _write_file_iface(iface, routecfg6, _RH_NETWORK_SCRIPT_DIR, 'route6-{0}') path = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'route-{0}'.format(iface)) path6 = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'route6-{0}'.format(iface)) routes = _read_file(path) routes.extend(_read_file(path6)) return routes def down(iface, iface_type): ''' Shutdown a network interface CLI Example: .. code-block:: bash salt '*' ip.down eth0 ''' # Slave devices are controlled by the master. if iface_type not in ['slave']: return __salt__['cmd.run']('ifdown {0}'.format(iface)) return None def get_interface(iface): ''' Return the contents of an interface script CLI Example: .. code-block:: bash salt '*' ip.get_interface eth0 ''' path = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'ifcfg-{0}'.format(iface)) return _read_file(path) def up(iface, iface_type): # pylint: disable=C0103 ''' Start up a network interface CLI Example: .. code-block:: bash salt '*' ip.up eth0 ''' # Slave devices are controlled by the master. if iface_type not in ['slave']: return __salt__['cmd.run']('ifup {0}'.format(iface)) return None def get_routes(iface): ''' Return the contents of the interface routes script. CLI Example: .. code-block:: bash salt '*' ip.get_routes eth0 ''' path = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'route-{0}'.format(iface)) path6 = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'route6-{0}'.format(iface)) routes = _read_file(path) routes.extend(_read_file(path6)) return routes def get_network_settings(): ''' Return the contents of the global network script. CLI Example: .. code-block:: bash salt '*' ip.get_network_settings ''' return _read_file(_RH_NETWORK_FILE) def apply_network_settings(**settings): ''' Apply global network configuration. CLI Example: .. code-block:: bash salt '*' ip.apply_network_settings ''' if 'require_reboot' not in settings: settings['require_reboot'] = False if 'apply_hostname' not in settings: settings['apply_hostname'] = False hostname_res = True if settings['apply_hostname'] in _CONFIG_TRUE: if 'hostname' in settings: hostname_res = __salt__['network.mod_hostname'](settings['hostname']) else: log.warning( 'The network state sls is trying to apply hostname ' 'changes but no hostname is defined.' ) hostname_res = False res = True if settings['require_reboot'] in _CONFIG_TRUE: log.warning( 'The network state sls is requiring a reboot of the system to ' 'properly apply network configuration.' ) res = True else: res = __salt__['service.restart']('network') return hostname_res and res def build_network_settings(**settings): ''' Build the global network script. CLI Example: .. code-block:: bash salt '*' ip.build_network_settings <settings> ''' # Read current configuration and store default values current_network_settings = _parse_rh_config(_RH_NETWORK_FILE) # Build settings opts = _parse_network_settings(settings, current_network_settings) try: template = JINJA.get_template('network.jinja') except jinja2.exceptions.TemplateNotFound: log.error('Could not load template network.jinja') return '' network = template.render(opts) if settings['test']: return _read_temp(network) # Write settings _write_file_network(network, _RH_NETWORK_FILE) return _read_file(_RH_NETWORK_FILE)
saltstack/salt
salt/modules/rh_ip.py
get_interface
python
def get_interface(iface): ''' Return the contents of an interface script CLI Example: .. code-block:: bash salt '*' ip.get_interface eth0 ''' path = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'ifcfg-{0}'.format(iface)) return _read_file(path)
Return the contents of an interface script CLI Example: .. code-block:: bash salt '*' ip.get_interface eth0
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/rh_ip.py#L1163-L1174
[ "def _read_file(path):\n '''\n Reads and returns the contents of a file\n '''\n try:\n with salt.utils.files.fopen(path, 'rb') as rfh:\n lines = salt.utils.stringutils.to_unicode(rfh.read()).splitlines()\n try:\n lines.remove('')\n except ValueError:\n pass\n return lines\n except Exception:\n return [] # Return empty list for type consistency\n" ]
# -*- coding: utf-8 -*- ''' The networking module for RHEL/Fedora based distros ''' from __future__ import absolute_import, unicode_literals, print_function # Import python libs import logging import os.path import os # Import third party libs import jinja2 import jinja2.exceptions # Import salt libs import salt.utils.files import salt.utils.stringutils import salt.utils.templates import salt.utils.validate.net from salt.exceptions import CommandExecutionError from salt.ext import six # Set up logging log = logging.getLogger(__name__) # Set up template environment JINJA = jinja2.Environment( loader=jinja2.FileSystemLoader( os.path.join(salt.utils.templates.TEMPLATE_DIRNAME, 'rh_ip') ) ) # Define the module's virtual name __virtualname__ = 'ip' def __virtual__(): ''' Confine this module to RHEL/Fedora based distros ''' if __grains__['os_family'] == 'RedHat': return __virtualname__ return (False, 'The rh_ip execution module cannot be loaded: this module is only available on RHEL/Fedora based distributions.') # Setup networking attributes _ETHTOOL_CONFIG_OPTS = [ 'autoneg', 'speed', 'duplex', 'rx', 'tx', 'sg', 'tso', 'ufo', 'gso', 'gro', 'lro', 'advertise' ] _RH_CONFIG_OPTS = [ 'domain', 'peerdns', 'peerntp', 'defroute', 'mtu', 'static-routes', 'gateway', 'zone' ] _RH_CONFIG_BONDING_OPTS = [ 'mode', 'miimon', 'arp_interval', 'arp_ip_target', 'downdelay', 'updelay', 'use_carrier', 'lacp_rate', 'hashing-algorithm', 'max_bonds', 'tx_queues', 'num_grat_arp', 'num_unsol_na', 'primary', 'primary_reselect', 'ad_select', 'xmit_hash_policy', 'arp_validate', 'fail_over_mac', 'all_slaves_active', 'resend_igmp' ] _RH_NETWORK_SCRIPT_DIR = '/etc/sysconfig/network-scripts' _RH_NETWORK_FILE = '/etc/sysconfig/network' _RH_NETWORK_CONF_FILES = '/etc/modprobe.d' _CONFIG_TRUE = ['yes', 'on', 'true', '1', True] _CONFIG_FALSE = ['no', 'off', 'false', '0', False] _IFACE_TYPES = [ 'eth', 'bond', 'alias', 'clone', 'ipsec', 'dialup', 'bridge', 'slave', 'vlan', 'ipip', 'ib', ] def _error_msg_iface(iface, option, expected): ''' Build an appropriate error message from a given option and a list of expected values. ''' msg = 'Invalid option -- Interface: {0}, Option: {1}, Expected: [{2}]' return msg.format(iface, option, '|'.join(str(e) for e in expected)) def _error_msg_routes(iface, option, expected): ''' Build an appropriate error message from a given option and a list of expected values. ''' msg = 'Invalid option -- Route interface: {0}, Option: {1}, Expected: [{2}]' return msg.format(iface, option, expected) def _log_default_iface(iface, opt, value): log.info('Using default option -- Interface: %s Option: %s Value: %s', iface, opt, value) def _error_msg_network(option, expected): ''' Build an appropriate error message from a given option and a list of expected values. ''' msg = 'Invalid network setting -- Setting: {0}, Expected: [{1}]' return msg.format(option, '|'.join(str(e) for e in expected)) def _log_default_network(opt, value): log.info('Using existing setting -- Setting: %s Value: %s', opt, value) def _parse_rh_config(path): rh_config = _read_file(path) cv_rh_config = {} if rh_config: for line in rh_config: line = line.strip() if not line or line.startswith('!') or line.startswith('#'): continue pair = [p.rstrip() for p in line.split('=', 1)] if len(pair) != 2: continue name, value = pair cv_rh_config[name.upper()] = value return cv_rh_config def _parse_ethtool_opts(opts, iface): ''' Filters given options and outputs valid settings for ETHTOOLS_OPTS If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' config = {} if 'autoneg' in opts: if opts['autoneg'] in _CONFIG_TRUE: config.update({'autoneg': 'on'}) elif opts['autoneg'] in _CONFIG_FALSE: config.update({'autoneg': 'off'}) else: _raise_error_iface(iface, 'autoneg', _CONFIG_TRUE + _CONFIG_FALSE) if 'duplex' in opts: valid = ['full', 'half'] if opts['duplex'] in valid: config.update({'duplex': opts['duplex']}) else: _raise_error_iface(iface, 'duplex', valid) if 'speed' in opts: valid = ['10', '100', '1000', '10000'] if six.text_type(opts['speed']) in valid: config.update({'speed': opts['speed']}) else: _raise_error_iface(iface, opts['speed'], valid) if 'advertise' in opts: valid = [ '0x001', '0x002', '0x004', '0x008', '0x010', '0x020', '0x20000', '0x8000', '0x1000', '0x40000', '0x80000', '0x200000', '0x400000', '0x800000', '0x1000000', '0x2000000', '0x4000000' ] if six.text_type(opts['advertise']) in valid: config.update({'advertise': opts['advertise']}) else: _raise_error_iface(iface, 'advertise', valid) valid = _CONFIG_TRUE + _CONFIG_FALSE for option in ('rx', 'tx', 'sg', 'tso', 'ufo', 'gso', 'gro', 'lro'): if option in opts: if opts[option] in _CONFIG_TRUE: config.update({option: 'on'}) elif opts[option] in _CONFIG_FALSE: config.update({option: 'off'}) else: _raise_error_iface(iface, option, valid) return config def _parse_settings_bond(opts, iface): ''' Filters given options and outputs valid settings for requested operation. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond_def = { # 803.ad aggregation selection logic # 0 for stable (default) # 1 for bandwidth # 2 for count 'ad_select': '0', # Max number of transmit queues (default = 16) 'tx_queues': '16', # Link monitoring in milliseconds. Most NICs support this 'miimon': '100', # ARP interval in milliseconds 'arp_interval': '250', # Delay before considering link down in milliseconds (miimon * 2) 'downdelay': '200', # lacp_rate 0: Slow - every 30 seconds # lacp_rate 1: Fast - every 1 second 'lacp_rate': '0', # Max bonds for this driver 'max_bonds': '1', # Specifies the time, in milliseconds, to wait before # enabling a slave after a link recovery has been # detected. Only used with miimon. 'updelay': '0', # Used with miimon. # On: driver sends mii # Off: ethtool sends mii 'use_carrier': '0', # Default. Don't change unless you know what you are doing. 'xmit_hash_policy': 'layer2', } if opts['mode'] in ['balance-rr', '0']: log.info( 'Device: %s Bonding Mode: load balancing (round-robin)', iface ) return _parse_settings_bond_0(opts, iface, bond_def) elif opts['mode'] in ['active-backup', '1']: log.info( 'Device: %s Bonding Mode: fault-tolerance (active-backup)', iface ) return _parse_settings_bond_1(opts, iface, bond_def) elif opts['mode'] in ['balance-xor', '2']: log.info( 'Device: %s Bonding Mode: load balancing (xor)', iface ) return _parse_settings_bond_2(opts, iface, bond_def) elif opts['mode'] in ['broadcast', '3']: log.info( 'Device: %s Bonding Mode: fault-tolerance (broadcast)', iface ) return _parse_settings_bond_3(opts, iface, bond_def) elif opts['mode'] in ['802.3ad', '4']: log.info( 'Device: %s Bonding Mode: IEEE 802.3ad Dynamic link ' 'aggregation', iface ) return _parse_settings_bond_4(opts, iface, bond_def) elif opts['mode'] in ['balance-tlb', '5']: log.info( 'Device: %s Bonding Mode: transmit load balancing', iface ) return _parse_settings_bond_5(opts, iface, bond_def) elif opts['mode'] in ['balance-alb', '6']: log.info( 'Device: %s Bonding Mode: adaptive load balancing', iface ) return _parse_settings_bond_6(opts, iface, bond_def) else: valid = [ '0', '1', '2', '3', '4', '5', '6', 'balance-rr', 'active-backup', 'balance-xor', 'broadcast', '802.3ad', 'balance-tlb', 'balance-alb' ] _raise_error_iface(iface, 'mode', valid) def _parse_settings_bond_0(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond0. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' # balance-rr shares miimon settings with balance-xor bond = _parse_settings_bond_1(opts, iface, bond_def) bond.update({'mode': '0'}) # ARP targets in n.n.n.n form valid = ['list of ips (up to 16)'] if 'arp_ip_target' in opts: if isinstance(opts['arp_ip_target'], list): if 1 <= len(opts['arp_ip_target']) <= 16: bond.update({'arp_ip_target': ''}) for ip in opts['arp_ip_target']: # pylint: disable=C0103 if bond['arp_ip_target']: bond['arp_ip_target'] = bond['arp_ip_target'] + ',' + ip else: bond['arp_ip_target'] = ip else: _raise_error_iface(iface, 'arp_ip_target', valid) else: _raise_error_iface(iface, 'arp_ip_target', valid) elif 'miimon' not in opts: _raise_error_iface(iface, 'arp_ip_target', valid) if 'arp_interval' in opts: try: int(opts['arp_interval']) bond.update({'arp_interval': opts['arp_interval']}) except Exception: _raise_error_iface(iface, 'arp_interval', ['integer']) else: _log_default_iface(iface, 'arp_interval', bond_def['arp_interval']) bond.update({'arp_interval': bond_def['arp_interval']}) return bond def _parse_settings_bond_1(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond1. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '1'} for binding in ['miimon', 'downdelay', 'updelay']: if binding in opts: try: int(opts[binding]) bond.update({binding: opts[binding]}) except Exception: _raise_error_iface(iface, binding, ['integer']) else: _log_default_iface(iface, binding, bond_def[binding]) bond.update({binding: bond_def[binding]}) if 'use_carrier' in opts: if opts['use_carrier'] in _CONFIG_TRUE: bond.update({'use_carrier': '1'}) elif opts['use_carrier'] in _CONFIG_FALSE: bond.update({'use_carrier': '0'}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'use_carrier', valid) else: _log_default_iface(iface, 'use_carrier', bond_def['use_carrier']) bond.update({'use_carrier': bond_def['use_carrier']}) if 'primary' in opts: bond.update({'primary': opts['primary']}) return bond def _parse_settings_bond_2(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond2. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '2'} valid = ['list of ips (up to 16)'] if 'arp_ip_target' in opts: if isinstance(opts['arp_ip_target'], list): if 1 <= len(opts['arp_ip_target']) <= 16: bond.update({'arp_ip_target': ''}) for ip in opts['arp_ip_target']: # pylint: disable=C0103 if bond['arp_ip_target']: bond['arp_ip_target'] = bond['arp_ip_target'] + ',' + ip else: bond['arp_ip_target'] = ip else: _raise_error_iface(iface, 'arp_ip_target', valid) else: _raise_error_iface(iface, 'arp_ip_target', valid) else: _raise_error_iface(iface, 'arp_ip_target', valid) if 'arp_interval' in opts: try: int(opts['arp_interval']) bond.update({'arp_interval': opts['arp_interval']}) except Exception: _raise_error_iface(iface, 'arp_interval', ['integer']) else: _log_default_iface(iface, 'arp_interval', bond_def['arp_interval']) bond.update({'arp_interval': bond_def['arp_interval']}) if 'hashing-algorithm' in opts: valid = ['layer2', 'layer2+3', 'layer3+4'] if opts['hashing-algorithm'] in valid: bond.update({'xmit_hash_policy': opts['hashing-algorithm']}) else: _raise_error_iface(iface, 'hashing-algorithm', valid) return bond def _parse_settings_bond_3(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond3. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '3'} for binding in ['miimon', 'downdelay', 'updelay']: if binding in opts: try: int(opts[binding]) bond.update({binding: opts[binding]}) except Exception: _raise_error_iface(iface, binding, ['integer']) else: _log_default_iface(iface, binding, bond_def[binding]) bond.update({binding: bond_def[binding]}) if 'use_carrier' in opts: if opts['use_carrier'] in _CONFIG_TRUE: bond.update({'use_carrier': '1'}) elif opts['use_carrier'] in _CONFIG_FALSE: bond.update({'use_carrier': '0'}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'use_carrier', valid) else: _log_default_iface(iface, 'use_carrier', bond_def['use_carrier']) bond.update({'use_carrier': bond_def['use_carrier']}) return bond def _parse_settings_bond_4(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond4. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '4'} for binding in ['miimon', 'downdelay', 'updelay', 'lacp_rate', 'ad_select']: if binding in opts: if binding == 'lacp_rate': if opts[binding] == 'fast': opts.update({binding: '1'}) if opts[binding] == 'slow': opts.update({binding: '0'}) valid = ['fast', '1', 'slow', '0'] else: valid = ['integer'] try: int(opts[binding]) bond.update({binding: opts[binding]}) except Exception: _raise_error_iface(iface, binding, valid) else: _log_default_iface(iface, binding, bond_def[binding]) bond.update({binding: bond_def[binding]}) if 'use_carrier' in opts: if opts['use_carrier'] in _CONFIG_TRUE: bond.update({'use_carrier': '1'}) elif opts['use_carrier'] in _CONFIG_FALSE: bond.update({'use_carrier': '0'}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'use_carrier', valid) else: _log_default_iface(iface, 'use_carrier', bond_def['use_carrier']) bond.update({'use_carrier': bond_def['use_carrier']}) if 'hashing-algorithm' in opts: valid = ['layer2', 'layer2+3', 'layer3+4'] if opts['hashing-algorithm'] in valid: bond.update({'xmit_hash_policy': opts['hashing-algorithm']}) else: _raise_error_iface(iface, 'hashing-algorithm', valid) return bond def _parse_settings_bond_5(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond5. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '5'} for binding in ['miimon', 'downdelay', 'updelay']: if binding in opts: try: int(opts[binding]) bond.update({binding: opts[binding]}) except Exception: _raise_error_iface(iface, binding, ['integer']) else: _log_default_iface(iface, binding, bond_def[binding]) bond.update({binding: bond_def[binding]}) if 'use_carrier' in opts: if opts['use_carrier'] in _CONFIG_TRUE: bond.update({'use_carrier': '1'}) elif opts['use_carrier'] in _CONFIG_FALSE: bond.update({'use_carrier': '0'}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'use_carrier', valid) else: _log_default_iface(iface, 'use_carrier', bond_def['use_carrier']) bond.update({'use_carrier': bond_def['use_carrier']}) if 'primary' in opts: bond.update({'primary': opts['primary']}) return bond def _parse_settings_bond_6(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond6. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '6'} for binding in ['miimon', 'downdelay', 'updelay']: if binding in opts: try: int(opts[binding]) bond.update({binding: opts[binding]}) except Exception: _raise_error_iface(iface, binding, ['integer']) else: _log_default_iface(iface, binding, bond_def[binding]) bond.update({binding: bond_def[binding]}) if 'use_carrier' in opts: if opts['use_carrier'] in _CONFIG_TRUE: bond.update({'use_carrier': '1'}) elif opts['use_carrier'] in _CONFIG_FALSE: bond.update({'use_carrier': '0'}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'use_carrier', valid) else: _log_default_iface(iface, 'use_carrier', bond_def['use_carrier']) bond.update({'use_carrier': bond_def['use_carrier']}) if 'primary' in opts: bond.update({'primary': opts['primary']}) return bond def _parse_settings_vlan(opts, iface): ''' Filters given options and outputs valid settings for a vlan ''' vlan = {} if 'reorder_hdr' in opts: if opts['reorder_hdr'] in _CONFIG_TRUE + _CONFIG_FALSE: vlan.update({'reorder_hdr': opts['reorder_hdr']}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'reorder_hdr', valid) if 'vlan_id' in opts: if opts['vlan_id'] > 0: vlan.update({'vlan_id': opts['vlan_id']}) else: _raise_error_iface(iface, 'vlan_id', 'Positive integer') if 'phys_dev' in opts: if opts['phys_dev']: vlan.update({'phys_dev': opts['phys_dev']}) else: _raise_error_iface(iface, 'phys_dev', 'Non-empty string') return vlan def _parse_settings_eth(opts, iface_type, enabled, iface): ''' Filters given options and outputs valid settings for a network interface. ''' result = {'name': iface} if 'proto' in opts: valid = ['none', 'bootp', 'dhcp'] if opts['proto'] in valid: result['proto'] = opts['proto'] else: _raise_error_iface(iface, opts['proto'], valid) if 'dns' in opts: result['dns'] = opts['dns'] result['peerdns'] = 'yes' if 'mtu' in opts: try: result['mtu'] = int(opts['mtu']) except ValueError: _raise_error_iface(iface, 'mtu', ['integer']) if iface_type not in ['bridge']: ethtool = _parse_ethtool_opts(opts, iface) if ethtool: result['ethtool'] = ethtool if iface_type == 'slave': result['proto'] = 'none' if iface_type == 'bond': bonding = _parse_settings_bond(opts, iface) if bonding: result['bonding'] = bonding result['devtype'] = "Bond" if iface_type == 'vlan': vlan = _parse_settings_vlan(opts, iface) if vlan: result['devtype'] = "Vlan" for opt in vlan: result[opt] = opts[opt] if iface_type not in ['bond', 'vlan', 'bridge', 'ipip']: auto_addr = False if 'addr' in opts: if salt.utils.validate.net.mac(opts['addr']): result['addr'] = opts['addr'] elif opts['addr'] == 'auto': auto_addr = True elif opts['addr'] != 'none': _raise_error_iface(iface, opts['addr'], ['AA:BB:CC:DD:EE:FF', 'auto', 'none']) else: auto_addr = True if auto_addr: # If interface type is slave for bond, not setting hwaddr if iface_type != 'slave': ifaces = __salt__['network.interfaces']() if iface in ifaces and 'hwaddr' in ifaces[iface]: result['addr'] = ifaces[iface]['hwaddr'] if iface_type == 'eth': result['devtype'] = 'Ethernet' if iface_type == 'bridge': result['devtype'] = 'Bridge' bypassfirewall = True valid = _CONFIG_TRUE + _CONFIG_FALSE for opt in ['bypassfirewall']: if opt in opts: if opts[opt] in _CONFIG_TRUE: bypassfirewall = True elif opts[opt] in _CONFIG_FALSE: bypassfirewall = False else: _raise_error_iface(iface, opts[opt], valid) bridgectls = [ 'net.bridge.bridge-nf-call-ip6tables', 'net.bridge.bridge-nf-call-iptables', 'net.bridge.bridge-nf-call-arptables', ] if bypassfirewall: sysctl_value = 0 else: sysctl_value = 1 for sysctl in bridgectls: try: __salt__['sysctl.persist'](sysctl, sysctl_value) except CommandExecutionError: log.warning('Failed to set sysctl: %s', sysctl) else: if 'bridge' in opts: result['bridge'] = opts['bridge'] if iface_type == 'ipip': result['devtype'] = 'IPIP' for opt in ['my_inner_ipaddr', 'my_outer_ipaddr']: if opt not in opts: _raise_error_iface(iface, opts[opt], ['1.2.3.4']) else: result[opt] = opts[opt] if iface_type == 'ib': result['devtype'] = 'InfiniBand' if 'prefix' in opts: if 'netmask' in opts: msg = 'Cannot use prefix and netmask together' log.error(msg) raise AttributeError(msg) result['prefix'] = opts['prefix'] elif 'netmask' in opts: result['netmask'] = opts['netmask'] for opt in ['ipaddr', 'master', 'srcaddr', 'delay', 'domain', 'gateway', 'uuid', 'nickname', 'zone']: if opt in opts: result[opt] = opts[opt] for opt in ['ipv6addr', 'ipv6gateway']: if opt in opts: result[opt] = opts[opt] if 'ipaddrs' in opts: result['ipaddrs'] = [] for opt in opts['ipaddrs']: if salt.utils.validate.net.ipv4_addr(opt): ip, prefix = [i.strip() for i in opt.split('/')] result['ipaddrs'].append({'ipaddr': ip, 'prefix': prefix}) else: msg = 'ipv4 CIDR is invalid' log.error(msg) raise AttributeError(msg) if 'ipv6addrs' in opts: for opt in opts['ipv6addrs']: if not salt.utils.validate.net.ipv6_addr(opt): msg = 'ipv6 CIDR is invalid' log.error(msg) raise AttributeError(msg) result['ipv6addrs'] = opts['ipv6addrs'] if 'enable_ipv6' in opts: result['enable_ipv6'] = opts['enable_ipv6'] valid = _CONFIG_TRUE + _CONFIG_FALSE for opt in ['onparent', 'peerdns', 'peerroutes', 'slave', 'vlan', 'defroute', 'stp', 'ipv6_peerdns', 'ipv6_defroute', 'ipv6_peerroutes', 'ipv6_autoconf', 'ipv4_failure_fatal', 'dhcpv6c']: if opt in opts: if opts[opt] in _CONFIG_TRUE: result[opt] = 'yes' elif opts[opt] in _CONFIG_FALSE: result[opt] = 'no' else: _raise_error_iface(iface, opts[opt], valid) if 'onboot' in opts: log.warning( 'The \'onboot\' option is controlled by the \'enabled\' option. ' 'Interface: %s Enabled: %s', iface, enabled ) if enabled: result['onboot'] = 'yes' else: result['onboot'] = 'no' # If the interface is defined then we want to always take # control away from non-root users; unless the administrator # wants to allow non-root users to control the device. if 'userctl' in opts: if opts['userctl'] in _CONFIG_TRUE: result['userctl'] = 'yes' elif opts['userctl'] in _CONFIG_FALSE: result['userctl'] = 'no' else: _raise_error_iface(iface, opts['userctl'], valid) else: result['userctl'] = 'no' # This vlan is in opts, and should be only used in range interface # will affect jinja template for interface generating if 'vlan' in opts: if opts['vlan'] in _CONFIG_TRUE: result['vlan'] = 'yes' elif opts['vlan'] in _CONFIG_FALSE: result['vlan'] = 'no' else: _raise_error_iface(iface, opts['vlan'], valid) if 'arpcheck' in opts: if opts['arpcheck'] in _CONFIG_FALSE: result['arpcheck'] = 'no' if 'ipaddr_start' in opts: result['ipaddr_start'] = opts['ipaddr_start'] if 'ipaddr_end' in opts: result['ipaddr_end'] = opts['ipaddr_end'] if 'clonenum_start' in opts: result['clonenum_start'] = opts['clonenum_start'] # If NetworkManager is available, we can control whether we use # it or not if 'nm_controlled' in opts: if opts['nm_controlled'] in _CONFIG_TRUE: result['nm_controlled'] = 'yes' elif opts['nm_controlled'] in _CONFIG_FALSE: result['nm_controlled'] = 'no' else: _raise_error_iface(iface, opts['nm_controlled'], valid) else: result['nm_controlled'] = 'no' return result def _parse_routes(iface, opts): ''' Filters given options and outputs valid settings for the route settings file. ''' # Normalize keys opts = dict((k.lower(), v) for (k, v) in six.iteritems(opts)) result = {} if 'routes' not in opts: _raise_error_routes(iface, 'routes', 'List of routes') for opt in opts: result[opt] = opts[opt] return result def _parse_network_settings(opts, current): ''' Filters given options and outputs valid settings for the global network settings file. ''' # Normalize keys opts = dict((k.lower(), v) for (k, v) in six.iteritems(opts)) current = dict((k.lower(), v) for (k, v) in six.iteritems(current)) # Check for supported parameters retain_settings = opts.get('retain_settings', False) result = current if retain_settings else {} # Default quote type is an empty string, which will not quote values quote_type = '' valid = _CONFIG_TRUE + _CONFIG_FALSE if 'enabled' not in opts: try: opts['networking'] = current['networking'] # If networking option is quoted, use its quote type quote_type = salt.utils.stringutils.is_quoted(opts['networking']) _log_default_network('networking', current['networking']) except ValueError: _raise_error_network('networking', valid) else: opts['networking'] = opts['enabled'] true_val = '{0}yes{0}'.format(quote_type) false_val = '{0}no{0}'.format(quote_type) networking = salt.utils.stringutils.dequote(opts['networking']) if networking in valid: if networking in _CONFIG_TRUE: result['networking'] = true_val elif networking in _CONFIG_FALSE: result['networking'] = false_val else: _raise_error_network('networking', valid) if 'hostname' not in opts: try: opts['hostname'] = current['hostname'] _log_default_network('hostname', current['hostname']) except Exception: _raise_error_network('hostname', ['server1.example.com']) if opts['hostname']: result['hostname'] = '{1}{0}{1}'.format( salt.utils.stringutils.dequote(opts['hostname']), quote_type) else: _raise_error_network('hostname', ['server1.example.com']) if 'nozeroconf' in opts: nozeroconf = salt.utils.stringutils.dequote(opts['nozeroconf']) if nozeroconf in valid: if nozeroconf in _CONFIG_TRUE: result['nozeroconf'] = true_val elif nozeroconf in _CONFIG_FALSE: result['nozeroconf'] = false_val else: _raise_error_network('nozeroconf', valid) for opt in opts: if opt not in ['networking', 'hostname', 'nozeroconf']: result[opt] = '{1}{0}{1}'.format( salt.utils.stringutils.dequote(opts[opt]), quote_type) return result def _raise_error_iface(iface, option, expected): ''' Log and raise an error with a logical formatted message. ''' msg = _error_msg_iface(iface, option, expected) log.error(msg) raise AttributeError(msg) def _raise_error_network(option, expected): ''' Log and raise an error with a logical formatted message. ''' msg = _error_msg_network(option, expected) log.error(msg) raise AttributeError(msg) def _raise_error_routes(iface, option, expected): ''' Log and raise an error with a logical formatted message. ''' msg = _error_msg_routes(iface, option, expected) log.error(msg) raise AttributeError(msg) def _read_file(path): ''' Reads and returns the contents of a file ''' try: with salt.utils.files.fopen(path, 'rb') as rfh: lines = salt.utils.stringutils.to_unicode(rfh.read()).splitlines() try: lines.remove('') except ValueError: pass return lines except Exception: return [] # Return empty list for type consistency def _write_file_iface(iface, data, folder, pattern): ''' Writes a file to disk ''' filename = os.path.join(folder, pattern.format(iface)) if not os.path.exists(folder): msg = '{0} cannot be written. {1} does not exist' msg = msg.format(filename, folder) log.error(msg) raise AttributeError(msg) with salt.utils.files.fopen(filename, 'w') as fp_: fp_.write(salt.utils.stringutils.to_str(data)) def _write_file_network(data, filename): ''' Writes a file to disk ''' with salt.utils.files.fopen(filename, 'w') as fp_: fp_.write(salt.utils.stringutils.to_str(data)) def _read_temp(data): lines = data.splitlines() try: # Discard newlines if they exist lines.remove('') except ValueError: pass return lines def build_bond(iface, **settings): ''' Create a bond script in /etc/modprobe.d with the passed settings and load the bonding kernel module. CLI Example: .. code-block:: bash salt '*' ip.build_bond bond0 mode=balance-alb ''' rh_major = __grains__['osrelease'][:1] opts = _parse_settings_bond(settings, iface) try: template = JINJA.get_template('conf.jinja') except jinja2.exceptions.TemplateNotFound: log.error('Could not load template conf.jinja') return '' data = template.render({'name': iface, 'bonding': opts}) _write_file_iface(iface, data, _RH_NETWORK_CONF_FILES, '{0}.conf'.format(iface)) path = os.path.join(_RH_NETWORK_CONF_FILES, '{0}.conf'.format(iface)) if rh_major == '5': __salt__['cmd.run']( 'sed -i -e "/^alias\\s{0}.*/d" /etc/modprobe.conf'.format(iface), python_shell=False ) __salt__['cmd.run']( 'sed -i -e "/^options\\s{0}.*/d" /etc/modprobe.conf'.format(iface), python_shell=False ) __salt__['file.append']('/etc/modprobe.conf', path) __salt__['kmod.load']('bonding') if settings['test']: return _read_temp(data) return _read_file(path) def build_interface(iface, iface_type, enabled, **settings): ''' Build an interface script for a network interface. CLI Example: .. code-block:: bash salt '*' ip.build_interface eth0 eth <settings> ''' if __grains__['os'] == 'Fedora': if __grains__['osmajorrelease'] >= 18: rh_major = '7' else: rh_major = '6' else: rh_major = __grains__['osrelease'][:1] iface_type = iface_type.lower() if iface_type not in _IFACE_TYPES: _raise_error_iface(iface, iface_type, _IFACE_TYPES) if iface_type == 'slave': settings['slave'] = 'yes' if 'master' not in settings: msg = 'master is a required setting for slave interfaces' log.error(msg) raise AttributeError(msg) if iface_type == 'vlan': settings['vlan'] = 'yes' if iface_type == 'bridge': __salt__['pkg.install']('bridge-utils') if iface_type in ['eth', 'bond', 'bridge', 'slave', 'vlan', 'ipip', 'ib', 'alias']: opts = _parse_settings_eth(settings, iface_type, enabled, iface) try: template = JINJA.get_template('rh{0}_eth.jinja'.format(rh_major)) except jinja2.exceptions.TemplateNotFound: log.error( 'Could not load template rh%s_eth.jinja', rh_major ) return '' ifcfg = template.render(opts) if 'test' in settings and settings['test']: return _read_temp(ifcfg) _write_file_iface(iface, ifcfg, _RH_NETWORK_SCRIPT_DIR, 'ifcfg-{0}') path = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'ifcfg-{0}'.format(iface)) return _read_file(path) def build_routes(iface, **settings): ''' Build a route script for a network interface. CLI Example: .. code-block:: bash salt '*' ip.build_routes eth0 <settings> ''' template = 'rh6_route_eth.jinja' try: if int(__grains__['osrelease'][0]) < 6: template = 'route_eth.jinja' except ValueError: pass log.debug('Template name: %s', template) opts = _parse_routes(iface, settings) log.debug('Opts: \n %s', opts) try: template = JINJA.get_template(template) except jinja2.exceptions.TemplateNotFound: log.error('Could not load template %s', template) return '' opts6 = [] opts4 = [] for route in opts['routes']: ipaddr = route['ipaddr'] if salt.utils.validate.net.ipv6_addr(ipaddr): opts6.append(route) else: opts4.append(route) log.debug("IPv4 routes:\n%s", opts4) log.debug("IPv6 routes:\n%s", opts6) routecfg = template.render(routes=opts4, iface=iface) routecfg6 = template.render(routes=opts6, iface=iface) if settings['test']: routes = _read_temp(routecfg) routes.extend(_read_temp(routecfg6)) return routes _write_file_iface(iface, routecfg, _RH_NETWORK_SCRIPT_DIR, 'route-{0}') _write_file_iface(iface, routecfg6, _RH_NETWORK_SCRIPT_DIR, 'route6-{0}') path = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'route-{0}'.format(iface)) path6 = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'route6-{0}'.format(iface)) routes = _read_file(path) routes.extend(_read_file(path6)) return routes def down(iface, iface_type): ''' Shutdown a network interface CLI Example: .. code-block:: bash salt '*' ip.down eth0 ''' # Slave devices are controlled by the master. if iface_type not in ['slave']: return __salt__['cmd.run']('ifdown {0}'.format(iface)) return None def get_bond(iface): ''' Return the content of a bond script CLI Example: .. code-block:: bash salt '*' ip.get_bond bond0 ''' path = os.path.join(_RH_NETWORK_CONF_FILES, '{0}.conf'.format(iface)) return _read_file(path) def up(iface, iface_type): # pylint: disable=C0103 ''' Start up a network interface CLI Example: .. code-block:: bash salt '*' ip.up eth0 ''' # Slave devices are controlled by the master. if iface_type not in ['slave']: return __salt__['cmd.run']('ifup {0}'.format(iface)) return None def get_routes(iface): ''' Return the contents of the interface routes script. CLI Example: .. code-block:: bash salt '*' ip.get_routes eth0 ''' path = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'route-{0}'.format(iface)) path6 = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'route6-{0}'.format(iface)) routes = _read_file(path) routes.extend(_read_file(path6)) return routes def get_network_settings(): ''' Return the contents of the global network script. CLI Example: .. code-block:: bash salt '*' ip.get_network_settings ''' return _read_file(_RH_NETWORK_FILE) def apply_network_settings(**settings): ''' Apply global network configuration. CLI Example: .. code-block:: bash salt '*' ip.apply_network_settings ''' if 'require_reboot' not in settings: settings['require_reboot'] = False if 'apply_hostname' not in settings: settings['apply_hostname'] = False hostname_res = True if settings['apply_hostname'] in _CONFIG_TRUE: if 'hostname' in settings: hostname_res = __salt__['network.mod_hostname'](settings['hostname']) else: log.warning( 'The network state sls is trying to apply hostname ' 'changes but no hostname is defined.' ) hostname_res = False res = True if settings['require_reboot'] in _CONFIG_TRUE: log.warning( 'The network state sls is requiring a reboot of the system to ' 'properly apply network configuration.' ) res = True else: res = __salt__['service.restart']('network') return hostname_res and res def build_network_settings(**settings): ''' Build the global network script. CLI Example: .. code-block:: bash salt '*' ip.build_network_settings <settings> ''' # Read current configuration and store default values current_network_settings = _parse_rh_config(_RH_NETWORK_FILE) # Build settings opts = _parse_network_settings(settings, current_network_settings) try: template = JINJA.get_template('network.jinja') except jinja2.exceptions.TemplateNotFound: log.error('Could not load template network.jinja') return '' network = template.render(opts) if settings['test']: return _read_temp(network) # Write settings _write_file_network(network, _RH_NETWORK_FILE) return _read_file(_RH_NETWORK_FILE)
saltstack/salt
salt/modules/rh_ip.py
get_routes
python
def get_routes(iface): ''' Return the contents of the interface routes script. CLI Example: .. code-block:: bash salt '*' ip.get_routes eth0 ''' path = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'route-{0}'.format(iface)) path6 = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'route6-{0}'.format(iface)) routes = _read_file(path) routes.extend(_read_file(path6)) return routes
Return the contents of the interface routes script. CLI Example: .. code-block:: bash salt '*' ip.get_routes eth0
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/rh_ip.py#L1193-L1207
[ "def _read_file(path):\n '''\n Reads and returns the contents of a file\n '''\n try:\n with salt.utils.files.fopen(path, 'rb') as rfh:\n lines = salt.utils.stringutils.to_unicode(rfh.read()).splitlines()\n try:\n lines.remove('')\n except ValueError:\n pass\n return lines\n except Exception:\n return [] # Return empty list for type consistency\n" ]
# -*- coding: utf-8 -*- ''' The networking module for RHEL/Fedora based distros ''' from __future__ import absolute_import, unicode_literals, print_function # Import python libs import logging import os.path import os # Import third party libs import jinja2 import jinja2.exceptions # Import salt libs import salt.utils.files import salt.utils.stringutils import salt.utils.templates import salt.utils.validate.net from salt.exceptions import CommandExecutionError from salt.ext import six # Set up logging log = logging.getLogger(__name__) # Set up template environment JINJA = jinja2.Environment( loader=jinja2.FileSystemLoader( os.path.join(salt.utils.templates.TEMPLATE_DIRNAME, 'rh_ip') ) ) # Define the module's virtual name __virtualname__ = 'ip' def __virtual__(): ''' Confine this module to RHEL/Fedora based distros ''' if __grains__['os_family'] == 'RedHat': return __virtualname__ return (False, 'The rh_ip execution module cannot be loaded: this module is only available on RHEL/Fedora based distributions.') # Setup networking attributes _ETHTOOL_CONFIG_OPTS = [ 'autoneg', 'speed', 'duplex', 'rx', 'tx', 'sg', 'tso', 'ufo', 'gso', 'gro', 'lro', 'advertise' ] _RH_CONFIG_OPTS = [ 'domain', 'peerdns', 'peerntp', 'defroute', 'mtu', 'static-routes', 'gateway', 'zone' ] _RH_CONFIG_BONDING_OPTS = [ 'mode', 'miimon', 'arp_interval', 'arp_ip_target', 'downdelay', 'updelay', 'use_carrier', 'lacp_rate', 'hashing-algorithm', 'max_bonds', 'tx_queues', 'num_grat_arp', 'num_unsol_na', 'primary', 'primary_reselect', 'ad_select', 'xmit_hash_policy', 'arp_validate', 'fail_over_mac', 'all_slaves_active', 'resend_igmp' ] _RH_NETWORK_SCRIPT_DIR = '/etc/sysconfig/network-scripts' _RH_NETWORK_FILE = '/etc/sysconfig/network' _RH_NETWORK_CONF_FILES = '/etc/modprobe.d' _CONFIG_TRUE = ['yes', 'on', 'true', '1', True] _CONFIG_FALSE = ['no', 'off', 'false', '0', False] _IFACE_TYPES = [ 'eth', 'bond', 'alias', 'clone', 'ipsec', 'dialup', 'bridge', 'slave', 'vlan', 'ipip', 'ib', ] def _error_msg_iface(iface, option, expected): ''' Build an appropriate error message from a given option and a list of expected values. ''' msg = 'Invalid option -- Interface: {0}, Option: {1}, Expected: [{2}]' return msg.format(iface, option, '|'.join(str(e) for e in expected)) def _error_msg_routes(iface, option, expected): ''' Build an appropriate error message from a given option and a list of expected values. ''' msg = 'Invalid option -- Route interface: {0}, Option: {1}, Expected: [{2}]' return msg.format(iface, option, expected) def _log_default_iface(iface, opt, value): log.info('Using default option -- Interface: %s Option: %s Value: %s', iface, opt, value) def _error_msg_network(option, expected): ''' Build an appropriate error message from a given option and a list of expected values. ''' msg = 'Invalid network setting -- Setting: {0}, Expected: [{1}]' return msg.format(option, '|'.join(str(e) for e in expected)) def _log_default_network(opt, value): log.info('Using existing setting -- Setting: %s Value: %s', opt, value) def _parse_rh_config(path): rh_config = _read_file(path) cv_rh_config = {} if rh_config: for line in rh_config: line = line.strip() if not line or line.startswith('!') or line.startswith('#'): continue pair = [p.rstrip() for p in line.split('=', 1)] if len(pair) != 2: continue name, value = pair cv_rh_config[name.upper()] = value return cv_rh_config def _parse_ethtool_opts(opts, iface): ''' Filters given options and outputs valid settings for ETHTOOLS_OPTS If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' config = {} if 'autoneg' in opts: if opts['autoneg'] in _CONFIG_TRUE: config.update({'autoneg': 'on'}) elif opts['autoneg'] in _CONFIG_FALSE: config.update({'autoneg': 'off'}) else: _raise_error_iface(iface, 'autoneg', _CONFIG_TRUE + _CONFIG_FALSE) if 'duplex' in opts: valid = ['full', 'half'] if opts['duplex'] in valid: config.update({'duplex': opts['duplex']}) else: _raise_error_iface(iface, 'duplex', valid) if 'speed' in opts: valid = ['10', '100', '1000', '10000'] if six.text_type(opts['speed']) in valid: config.update({'speed': opts['speed']}) else: _raise_error_iface(iface, opts['speed'], valid) if 'advertise' in opts: valid = [ '0x001', '0x002', '0x004', '0x008', '0x010', '0x020', '0x20000', '0x8000', '0x1000', '0x40000', '0x80000', '0x200000', '0x400000', '0x800000', '0x1000000', '0x2000000', '0x4000000' ] if six.text_type(opts['advertise']) in valid: config.update({'advertise': opts['advertise']}) else: _raise_error_iface(iface, 'advertise', valid) valid = _CONFIG_TRUE + _CONFIG_FALSE for option in ('rx', 'tx', 'sg', 'tso', 'ufo', 'gso', 'gro', 'lro'): if option in opts: if opts[option] in _CONFIG_TRUE: config.update({option: 'on'}) elif opts[option] in _CONFIG_FALSE: config.update({option: 'off'}) else: _raise_error_iface(iface, option, valid) return config def _parse_settings_bond(opts, iface): ''' Filters given options and outputs valid settings for requested operation. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond_def = { # 803.ad aggregation selection logic # 0 for stable (default) # 1 for bandwidth # 2 for count 'ad_select': '0', # Max number of transmit queues (default = 16) 'tx_queues': '16', # Link monitoring in milliseconds. Most NICs support this 'miimon': '100', # ARP interval in milliseconds 'arp_interval': '250', # Delay before considering link down in milliseconds (miimon * 2) 'downdelay': '200', # lacp_rate 0: Slow - every 30 seconds # lacp_rate 1: Fast - every 1 second 'lacp_rate': '0', # Max bonds for this driver 'max_bonds': '1', # Specifies the time, in milliseconds, to wait before # enabling a slave after a link recovery has been # detected. Only used with miimon. 'updelay': '0', # Used with miimon. # On: driver sends mii # Off: ethtool sends mii 'use_carrier': '0', # Default. Don't change unless you know what you are doing. 'xmit_hash_policy': 'layer2', } if opts['mode'] in ['balance-rr', '0']: log.info( 'Device: %s Bonding Mode: load balancing (round-robin)', iface ) return _parse_settings_bond_0(opts, iface, bond_def) elif opts['mode'] in ['active-backup', '1']: log.info( 'Device: %s Bonding Mode: fault-tolerance (active-backup)', iface ) return _parse_settings_bond_1(opts, iface, bond_def) elif opts['mode'] in ['balance-xor', '2']: log.info( 'Device: %s Bonding Mode: load balancing (xor)', iface ) return _parse_settings_bond_2(opts, iface, bond_def) elif opts['mode'] in ['broadcast', '3']: log.info( 'Device: %s Bonding Mode: fault-tolerance (broadcast)', iface ) return _parse_settings_bond_3(opts, iface, bond_def) elif opts['mode'] in ['802.3ad', '4']: log.info( 'Device: %s Bonding Mode: IEEE 802.3ad Dynamic link ' 'aggregation', iface ) return _parse_settings_bond_4(opts, iface, bond_def) elif opts['mode'] in ['balance-tlb', '5']: log.info( 'Device: %s Bonding Mode: transmit load balancing', iface ) return _parse_settings_bond_5(opts, iface, bond_def) elif opts['mode'] in ['balance-alb', '6']: log.info( 'Device: %s Bonding Mode: adaptive load balancing', iface ) return _parse_settings_bond_6(opts, iface, bond_def) else: valid = [ '0', '1', '2', '3', '4', '5', '6', 'balance-rr', 'active-backup', 'balance-xor', 'broadcast', '802.3ad', 'balance-tlb', 'balance-alb' ] _raise_error_iface(iface, 'mode', valid) def _parse_settings_bond_0(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond0. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' # balance-rr shares miimon settings with balance-xor bond = _parse_settings_bond_1(opts, iface, bond_def) bond.update({'mode': '0'}) # ARP targets in n.n.n.n form valid = ['list of ips (up to 16)'] if 'arp_ip_target' in opts: if isinstance(opts['arp_ip_target'], list): if 1 <= len(opts['arp_ip_target']) <= 16: bond.update({'arp_ip_target': ''}) for ip in opts['arp_ip_target']: # pylint: disable=C0103 if bond['arp_ip_target']: bond['arp_ip_target'] = bond['arp_ip_target'] + ',' + ip else: bond['arp_ip_target'] = ip else: _raise_error_iface(iface, 'arp_ip_target', valid) else: _raise_error_iface(iface, 'arp_ip_target', valid) elif 'miimon' not in opts: _raise_error_iface(iface, 'arp_ip_target', valid) if 'arp_interval' in opts: try: int(opts['arp_interval']) bond.update({'arp_interval': opts['arp_interval']}) except Exception: _raise_error_iface(iface, 'arp_interval', ['integer']) else: _log_default_iface(iface, 'arp_interval', bond_def['arp_interval']) bond.update({'arp_interval': bond_def['arp_interval']}) return bond def _parse_settings_bond_1(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond1. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '1'} for binding in ['miimon', 'downdelay', 'updelay']: if binding in opts: try: int(opts[binding]) bond.update({binding: opts[binding]}) except Exception: _raise_error_iface(iface, binding, ['integer']) else: _log_default_iface(iface, binding, bond_def[binding]) bond.update({binding: bond_def[binding]}) if 'use_carrier' in opts: if opts['use_carrier'] in _CONFIG_TRUE: bond.update({'use_carrier': '1'}) elif opts['use_carrier'] in _CONFIG_FALSE: bond.update({'use_carrier': '0'}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'use_carrier', valid) else: _log_default_iface(iface, 'use_carrier', bond_def['use_carrier']) bond.update({'use_carrier': bond_def['use_carrier']}) if 'primary' in opts: bond.update({'primary': opts['primary']}) return bond def _parse_settings_bond_2(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond2. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '2'} valid = ['list of ips (up to 16)'] if 'arp_ip_target' in opts: if isinstance(opts['arp_ip_target'], list): if 1 <= len(opts['arp_ip_target']) <= 16: bond.update({'arp_ip_target': ''}) for ip in opts['arp_ip_target']: # pylint: disable=C0103 if bond['arp_ip_target']: bond['arp_ip_target'] = bond['arp_ip_target'] + ',' + ip else: bond['arp_ip_target'] = ip else: _raise_error_iface(iface, 'arp_ip_target', valid) else: _raise_error_iface(iface, 'arp_ip_target', valid) else: _raise_error_iface(iface, 'arp_ip_target', valid) if 'arp_interval' in opts: try: int(opts['arp_interval']) bond.update({'arp_interval': opts['arp_interval']}) except Exception: _raise_error_iface(iface, 'arp_interval', ['integer']) else: _log_default_iface(iface, 'arp_interval', bond_def['arp_interval']) bond.update({'arp_interval': bond_def['arp_interval']}) if 'hashing-algorithm' in opts: valid = ['layer2', 'layer2+3', 'layer3+4'] if opts['hashing-algorithm'] in valid: bond.update({'xmit_hash_policy': opts['hashing-algorithm']}) else: _raise_error_iface(iface, 'hashing-algorithm', valid) return bond def _parse_settings_bond_3(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond3. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '3'} for binding in ['miimon', 'downdelay', 'updelay']: if binding in opts: try: int(opts[binding]) bond.update({binding: opts[binding]}) except Exception: _raise_error_iface(iface, binding, ['integer']) else: _log_default_iface(iface, binding, bond_def[binding]) bond.update({binding: bond_def[binding]}) if 'use_carrier' in opts: if opts['use_carrier'] in _CONFIG_TRUE: bond.update({'use_carrier': '1'}) elif opts['use_carrier'] in _CONFIG_FALSE: bond.update({'use_carrier': '0'}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'use_carrier', valid) else: _log_default_iface(iface, 'use_carrier', bond_def['use_carrier']) bond.update({'use_carrier': bond_def['use_carrier']}) return bond def _parse_settings_bond_4(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond4. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '4'} for binding in ['miimon', 'downdelay', 'updelay', 'lacp_rate', 'ad_select']: if binding in opts: if binding == 'lacp_rate': if opts[binding] == 'fast': opts.update({binding: '1'}) if opts[binding] == 'slow': opts.update({binding: '0'}) valid = ['fast', '1', 'slow', '0'] else: valid = ['integer'] try: int(opts[binding]) bond.update({binding: opts[binding]}) except Exception: _raise_error_iface(iface, binding, valid) else: _log_default_iface(iface, binding, bond_def[binding]) bond.update({binding: bond_def[binding]}) if 'use_carrier' in opts: if opts['use_carrier'] in _CONFIG_TRUE: bond.update({'use_carrier': '1'}) elif opts['use_carrier'] in _CONFIG_FALSE: bond.update({'use_carrier': '0'}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'use_carrier', valid) else: _log_default_iface(iface, 'use_carrier', bond_def['use_carrier']) bond.update({'use_carrier': bond_def['use_carrier']}) if 'hashing-algorithm' in opts: valid = ['layer2', 'layer2+3', 'layer3+4'] if opts['hashing-algorithm'] in valid: bond.update({'xmit_hash_policy': opts['hashing-algorithm']}) else: _raise_error_iface(iface, 'hashing-algorithm', valid) return bond def _parse_settings_bond_5(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond5. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '5'} for binding in ['miimon', 'downdelay', 'updelay']: if binding in opts: try: int(opts[binding]) bond.update({binding: opts[binding]}) except Exception: _raise_error_iface(iface, binding, ['integer']) else: _log_default_iface(iface, binding, bond_def[binding]) bond.update({binding: bond_def[binding]}) if 'use_carrier' in opts: if opts['use_carrier'] in _CONFIG_TRUE: bond.update({'use_carrier': '1'}) elif opts['use_carrier'] in _CONFIG_FALSE: bond.update({'use_carrier': '0'}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'use_carrier', valid) else: _log_default_iface(iface, 'use_carrier', bond_def['use_carrier']) bond.update({'use_carrier': bond_def['use_carrier']}) if 'primary' in opts: bond.update({'primary': opts['primary']}) return bond def _parse_settings_bond_6(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond6. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '6'} for binding in ['miimon', 'downdelay', 'updelay']: if binding in opts: try: int(opts[binding]) bond.update({binding: opts[binding]}) except Exception: _raise_error_iface(iface, binding, ['integer']) else: _log_default_iface(iface, binding, bond_def[binding]) bond.update({binding: bond_def[binding]}) if 'use_carrier' in opts: if opts['use_carrier'] in _CONFIG_TRUE: bond.update({'use_carrier': '1'}) elif opts['use_carrier'] in _CONFIG_FALSE: bond.update({'use_carrier': '0'}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'use_carrier', valid) else: _log_default_iface(iface, 'use_carrier', bond_def['use_carrier']) bond.update({'use_carrier': bond_def['use_carrier']}) if 'primary' in opts: bond.update({'primary': opts['primary']}) return bond def _parse_settings_vlan(opts, iface): ''' Filters given options and outputs valid settings for a vlan ''' vlan = {} if 'reorder_hdr' in opts: if opts['reorder_hdr'] in _CONFIG_TRUE + _CONFIG_FALSE: vlan.update({'reorder_hdr': opts['reorder_hdr']}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'reorder_hdr', valid) if 'vlan_id' in opts: if opts['vlan_id'] > 0: vlan.update({'vlan_id': opts['vlan_id']}) else: _raise_error_iface(iface, 'vlan_id', 'Positive integer') if 'phys_dev' in opts: if opts['phys_dev']: vlan.update({'phys_dev': opts['phys_dev']}) else: _raise_error_iface(iface, 'phys_dev', 'Non-empty string') return vlan def _parse_settings_eth(opts, iface_type, enabled, iface): ''' Filters given options and outputs valid settings for a network interface. ''' result = {'name': iface} if 'proto' in opts: valid = ['none', 'bootp', 'dhcp'] if opts['proto'] in valid: result['proto'] = opts['proto'] else: _raise_error_iface(iface, opts['proto'], valid) if 'dns' in opts: result['dns'] = opts['dns'] result['peerdns'] = 'yes' if 'mtu' in opts: try: result['mtu'] = int(opts['mtu']) except ValueError: _raise_error_iface(iface, 'mtu', ['integer']) if iface_type not in ['bridge']: ethtool = _parse_ethtool_opts(opts, iface) if ethtool: result['ethtool'] = ethtool if iface_type == 'slave': result['proto'] = 'none' if iface_type == 'bond': bonding = _parse_settings_bond(opts, iface) if bonding: result['bonding'] = bonding result['devtype'] = "Bond" if iface_type == 'vlan': vlan = _parse_settings_vlan(opts, iface) if vlan: result['devtype'] = "Vlan" for opt in vlan: result[opt] = opts[opt] if iface_type not in ['bond', 'vlan', 'bridge', 'ipip']: auto_addr = False if 'addr' in opts: if salt.utils.validate.net.mac(opts['addr']): result['addr'] = opts['addr'] elif opts['addr'] == 'auto': auto_addr = True elif opts['addr'] != 'none': _raise_error_iface(iface, opts['addr'], ['AA:BB:CC:DD:EE:FF', 'auto', 'none']) else: auto_addr = True if auto_addr: # If interface type is slave for bond, not setting hwaddr if iface_type != 'slave': ifaces = __salt__['network.interfaces']() if iface in ifaces and 'hwaddr' in ifaces[iface]: result['addr'] = ifaces[iface]['hwaddr'] if iface_type == 'eth': result['devtype'] = 'Ethernet' if iface_type == 'bridge': result['devtype'] = 'Bridge' bypassfirewall = True valid = _CONFIG_TRUE + _CONFIG_FALSE for opt in ['bypassfirewall']: if opt in opts: if opts[opt] in _CONFIG_TRUE: bypassfirewall = True elif opts[opt] in _CONFIG_FALSE: bypassfirewall = False else: _raise_error_iface(iface, opts[opt], valid) bridgectls = [ 'net.bridge.bridge-nf-call-ip6tables', 'net.bridge.bridge-nf-call-iptables', 'net.bridge.bridge-nf-call-arptables', ] if bypassfirewall: sysctl_value = 0 else: sysctl_value = 1 for sysctl in bridgectls: try: __salt__['sysctl.persist'](sysctl, sysctl_value) except CommandExecutionError: log.warning('Failed to set sysctl: %s', sysctl) else: if 'bridge' in opts: result['bridge'] = opts['bridge'] if iface_type == 'ipip': result['devtype'] = 'IPIP' for opt in ['my_inner_ipaddr', 'my_outer_ipaddr']: if opt not in opts: _raise_error_iface(iface, opts[opt], ['1.2.3.4']) else: result[opt] = opts[opt] if iface_type == 'ib': result['devtype'] = 'InfiniBand' if 'prefix' in opts: if 'netmask' in opts: msg = 'Cannot use prefix and netmask together' log.error(msg) raise AttributeError(msg) result['prefix'] = opts['prefix'] elif 'netmask' in opts: result['netmask'] = opts['netmask'] for opt in ['ipaddr', 'master', 'srcaddr', 'delay', 'domain', 'gateway', 'uuid', 'nickname', 'zone']: if opt in opts: result[opt] = opts[opt] for opt in ['ipv6addr', 'ipv6gateway']: if opt in opts: result[opt] = opts[opt] if 'ipaddrs' in opts: result['ipaddrs'] = [] for opt in opts['ipaddrs']: if salt.utils.validate.net.ipv4_addr(opt): ip, prefix = [i.strip() for i in opt.split('/')] result['ipaddrs'].append({'ipaddr': ip, 'prefix': prefix}) else: msg = 'ipv4 CIDR is invalid' log.error(msg) raise AttributeError(msg) if 'ipv6addrs' in opts: for opt in opts['ipv6addrs']: if not salt.utils.validate.net.ipv6_addr(opt): msg = 'ipv6 CIDR is invalid' log.error(msg) raise AttributeError(msg) result['ipv6addrs'] = opts['ipv6addrs'] if 'enable_ipv6' in opts: result['enable_ipv6'] = opts['enable_ipv6'] valid = _CONFIG_TRUE + _CONFIG_FALSE for opt in ['onparent', 'peerdns', 'peerroutes', 'slave', 'vlan', 'defroute', 'stp', 'ipv6_peerdns', 'ipv6_defroute', 'ipv6_peerroutes', 'ipv6_autoconf', 'ipv4_failure_fatal', 'dhcpv6c']: if opt in opts: if opts[opt] in _CONFIG_TRUE: result[opt] = 'yes' elif opts[opt] in _CONFIG_FALSE: result[opt] = 'no' else: _raise_error_iface(iface, opts[opt], valid) if 'onboot' in opts: log.warning( 'The \'onboot\' option is controlled by the \'enabled\' option. ' 'Interface: %s Enabled: %s', iface, enabled ) if enabled: result['onboot'] = 'yes' else: result['onboot'] = 'no' # If the interface is defined then we want to always take # control away from non-root users; unless the administrator # wants to allow non-root users to control the device. if 'userctl' in opts: if opts['userctl'] in _CONFIG_TRUE: result['userctl'] = 'yes' elif opts['userctl'] in _CONFIG_FALSE: result['userctl'] = 'no' else: _raise_error_iface(iface, opts['userctl'], valid) else: result['userctl'] = 'no' # This vlan is in opts, and should be only used in range interface # will affect jinja template for interface generating if 'vlan' in opts: if opts['vlan'] in _CONFIG_TRUE: result['vlan'] = 'yes' elif opts['vlan'] in _CONFIG_FALSE: result['vlan'] = 'no' else: _raise_error_iface(iface, opts['vlan'], valid) if 'arpcheck' in opts: if opts['arpcheck'] in _CONFIG_FALSE: result['arpcheck'] = 'no' if 'ipaddr_start' in opts: result['ipaddr_start'] = opts['ipaddr_start'] if 'ipaddr_end' in opts: result['ipaddr_end'] = opts['ipaddr_end'] if 'clonenum_start' in opts: result['clonenum_start'] = opts['clonenum_start'] # If NetworkManager is available, we can control whether we use # it or not if 'nm_controlled' in opts: if opts['nm_controlled'] in _CONFIG_TRUE: result['nm_controlled'] = 'yes' elif opts['nm_controlled'] in _CONFIG_FALSE: result['nm_controlled'] = 'no' else: _raise_error_iface(iface, opts['nm_controlled'], valid) else: result['nm_controlled'] = 'no' return result def _parse_routes(iface, opts): ''' Filters given options and outputs valid settings for the route settings file. ''' # Normalize keys opts = dict((k.lower(), v) for (k, v) in six.iteritems(opts)) result = {} if 'routes' not in opts: _raise_error_routes(iface, 'routes', 'List of routes') for opt in opts: result[opt] = opts[opt] return result def _parse_network_settings(opts, current): ''' Filters given options and outputs valid settings for the global network settings file. ''' # Normalize keys opts = dict((k.lower(), v) for (k, v) in six.iteritems(opts)) current = dict((k.lower(), v) for (k, v) in six.iteritems(current)) # Check for supported parameters retain_settings = opts.get('retain_settings', False) result = current if retain_settings else {} # Default quote type is an empty string, which will not quote values quote_type = '' valid = _CONFIG_TRUE + _CONFIG_FALSE if 'enabled' not in opts: try: opts['networking'] = current['networking'] # If networking option is quoted, use its quote type quote_type = salt.utils.stringutils.is_quoted(opts['networking']) _log_default_network('networking', current['networking']) except ValueError: _raise_error_network('networking', valid) else: opts['networking'] = opts['enabled'] true_val = '{0}yes{0}'.format(quote_type) false_val = '{0}no{0}'.format(quote_type) networking = salt.utils.stringutils.dequote(opts['networking']) if networking in valid: if networking in _CONFIG_TRUE: result['networking'] = true_val elif networking in _CONFIG_FALSE: result['networking'] = false_val else: _raise_error_network('networking', valid) if 'hostname' not in opts: try: opts['hostname'] = current['hostname'] _log_default_network('hostname', current['hostname']) except Exception: _raise_error_network('hostname', ['server1.example.com']) if opts['hostname']: result['hostname'] = '{1}{0}{1}'.format( salt.utils.stringutils.dequote(opts['hostname']), quote_type) else: _raise_error_network('hostname', ['server1.example.com']) if 'nozeroconf' in opts: nozeroconf = salt.utils.stringutils.dequote(opts['nozeroconf']) if nozeroconf in valid: if nozeroconf in _CONFIG_TRUE: result['nozeroconf'] = true_val elif nozeroconf in _CONFIG_FALSE: result['nozeroconf'] = false_val else: _raise_error_network('nozeroconf', valid) for opt in opts: if opt not in ['networking', 'hostname', 'nozeroconf']: result[opt] = '{1}{0}{1}'.format( salt.utils.stringutils.dequote(opts[opt]), quote_type) return result def _raise_error_iface(iface, option, expected): ''' Log and raise an error with a logical formatted message. ''' msg = _error_msg_iface(iface, option, expected) log.error(msg) raise AttributeError(msg) def _raise_error_network(option, expected): ''' Log and raise an error with a logical formatted message. ''' msg = _error_msg_network(option, expected) log.error(msg) raise AttributeError(msg) def _raise_error_routes(iface, option, expected): ''' Log and raise an error with a logical formatted message. ''' msg = _error_msg_routes(iface, option, expected) log.error(msg) raise AttributeError(msg) def _read_file(path): ''' Reads and returns the contents of a file ''' try: with salt.utils.files.fopen(path, 'rb') as rfh: lines = salt.utils.stringutils.to_unicode(rfh.read()).splitlines() try: lines.remove('') except ValueError: pass return lines except Exception: return [] # Return empty list for type consistency def _write_file_iface(iface, data, folder, pattern): ''' Writes a file to disk ''' filename = os.path.join(folder, pattern.format(iface)) if not os.path.exists(folder): msg = '{0} cannot be written. {1} does not exist' msg = msg.format(filename, folder) log.error(msg) raise AttributeError(msg) with salt.utils.files.fopen(filename, 'w') as fp_: fp_.write(salt.utils.stringutils.to_str(data)) def _write_file_network(data, filename): ''' Writes a file to disk ''' with salt.utils.files.fopen(filename, 'w') as fp_: fp_.write(salt.utils.stringutils.to_str(data)) def _read_temp(data): lines = data.splitlines() try: # Discard newlines if they exist lines.remove('') except ValueError: pass return lines def build_bond(iface, **settings): ''' Create a bond script in /etc/modprobe.d with the passed settings and load the bonding kernel module. CLI Example: .. code-block:: bash salt '*' ip.build_bond bond0 mode=balance-alb ''' rh_major = __grains__['osrelease'][:1] opts = _parse_settings_bond(settings, iface) try: template = JINJA.get_template('conf.jinja') except jinja2.exceptions.TemplateNotFound: log.error('Could not load template conf.jinja') return '' data = template.render({'name': iface, 'bonding': opts}) _write_file_iface(iface, data, _RH_NETWORK_CONF_FILES, '{0}.conf'.format(iface)) path = os.path.join(_RH_NETWORK_CONF_FILES, '{0}.conf'.format(iface)) if rh_major == '5': __salt__['cmd.run']( 'sed -i -e "/^alias\\s{0}.*/d" /etc/modprobe.conf'.format(iface), python_shell=False ) __salt__['cmd.run']( 'sed -i -e "/^options\\s{0}.*/d" /etc/modprobe.conf'.format(iface), python_shell=False ) __salt__['file.append']('/etc/modprobe.conf', path) __salt__['kmod.load']('bonding') if settings['test']: return _read_temp(data) return _read_file(path) def build_interface(iface, iface_type, enabled, **settings): ''' Build an interface script for a network interface. CLI Example: .. code-block:: bash salt '*' ip.build_interface eth0 eth <settings> ''' if __grains__['os'] == 'Fedora': if __grains__['osmajorrelease'] >= 18: rh_major = '7' else: rh_major = '6' else: rh_major = __grains__['osrelease'][:1] iface_type = iface_type.lower() if iface_type not in _IFACE_TYPES: _raise_error_iface(iface, iface_type, _IFACE_TYPES) if iface_type == 'slave': settings['slave'] = 'yes' if 'master' not in settings: msg = 'master is a required setting for slave interfaces' log.error(msg) raise AttributeError(msg) if iface_type == 'vlan': settings['vlan'] = 'yes' if iface_type == 'bridge': __salt__['pkg.install']('bridge-utils') if iface_type in ['eth', 'bond', 'bridge', 'slave', 'vlan', 'ipip', 'ib', 'alias']: opts = _parse_settings_eth(settings, iface_type, enabled, iface) try: template = JINJA.get_template('rh{0}_eth.jinja'.format(rh_major)) except jinja2.exceptions.TemplateNotFound: log.error( 'Could not load template rh%s_eth.jinja', rh_major ) return '' ifcfg = template.render(opts) if 'test' in settings and settings['test']: return _read_temp(ifcfg) _write_file_iface(iface, ifcfg, _RH_NETWORK_SCRIPT_DIR, 'ifcfg-{0}') path = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'ifcfg-{0}'.format(iface)) return _read_file(path) def build_routes(iface, **settings): ''' Build a route script for a network interface. CLI Example: .. code-block:: bash salt '*' ip.build_routes eth0 <settings> ''' template = 'rh6_route_eth.jinja' try: if int(__grains__['osrelease'][0]) < 6: template = 'route_eth.jinja' except ValueError: pass log.debug('Template name: %s', template) opts = _parse_routes(iface, settings) log.debug('Opts: \n %s', opts) try: template = JINJA.get_template(template) except jinja2.exceptions.TemplateNotFound: log.error('Could not load template %s', template) return '' opts6 = [] opts4 = [] for route in opts['routes']: ipaddr = route['ipaddr'] if salt.utils.validate.net.ipv6_addr(ipaddr): opts6.append(route) else: opts4.append(route) log.debug("IPv4 routes:\n%s", opts4) log.debug("IPv6 routes:\n%s", opts6) routecfg = template.render(routes=opts4, iface=iface) routecfg6 = template.render(routes=opts6, iface=iface) if settings['test']: routes = _read_temp(routecfg) routes.extend(_read_temp(routecfg6)) return routes _write_file_iface(iface, routecfg, _RH_NETWORK_SCRIPT_DIR, 'route-{0}') _write_file_iface(iface, routecfg6, _RH_NETWORK_SCRIPT_DIR, 'route6-{0}') path = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'route-{0}'.format(iface)) path6 = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'route6-{0}'.format(iface)) routes = _read_file(path) routes.extend(_read_file(path6)) return routes def down(iface, iface_type): ''' Shutdown a network interface CLI Example: .. code-block:: bash salt '*' ip.down eth0 ''' # Slave devices are controlled by the master. if iface_type not in ['slave']: return __salt__['cmd.run']('ifdown {0}'.format(iface)) return None def get_bond(iface): ''' Return the content of a bond script CLI Example: .. code-block:: bash salt '*' ip.get_bond bond0 ''' path = os.path.join(_RH_NETWORK_CONF_FILES, '{0}.conf'.format(iface)) return _read_file(path) def get_interface(iface): ''' Return the contents of an interface script CLI Example: .. code-block:: bash salt '*' ip.get_interface eth0 ''' path = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'ifcfg-{0}'.format(iface)) return _read_file(path) def up(iface, iface_type): # pylint: disable=C0103 ''' Start up a network interface CLI Example: .. code-block:: bash salt '*' ip.up eth0 ''' # Slave devices are controlled by the master. if iface_type not in ['slave']: return __salt__['cmd.run']('ifup {0}'.format(iface)) return None def get_network_settings(): ''' Return the contents of the global network script. CLI Example: .. code-block:: bash salt '*' ip.get_network_settings ''' return _read_file(_RH_NETWORK_FILE) def apply_network_settings(**settings): ''' Apply global network configuration. CLI Example: .. code-block:: bash salt '*' ip.apply_network_settings ''' if 'require_reboot' not in settings: settings['require_reboot'] = False if 'apply_hostname' not in settings: settings['apply_hostname'] = False hostname_res = True if settings['apply_hostname'] in _CONFIG_TRUE: if 'hostname' in settings: hostname_res = __salt__['network.mod_hostname'](settings['hostname']) else: log.warning( 'The network state sls is trying to apply hostname ' 'changes but no hostname is defined.' ) hostname_res = False res = True if settings['require_reboot'] in _CONFIG_TRUE: log.warning( 'The network state sls is requiring a reboot of the system to ' 'properly apply network configuration.' ) res = True else: res = __salt__['service.restart']('network') return hostname_res and res def build_network_settings(**settings): ''' Build the global network script. CLI Example: .. code-block:: bash salt '*' ip.build_network_settings <settings> ''' # Read current configuration and store default values current_network_settings = _parse_rh_config(_RH_NETWORK_FILE) # Build settings opts = _parse_network_settings(settings, current_network_settings) try: template = JINJA.get_template('network.jinja') except jinja2.exceptions.TemplateNotFound: log.error('Could not load template network.jinja') return '' network = template.render(opts) if settings['test']: return _read_temp(network) # Write settings _write_file_network(network, _RH_NETWORK_FILE) return _read_file(_RH_NETWORK_FILE)
saltstack/salt
salt/modules/rh_ip.py
apply_network_settings
python
def apply_network_settings(**settings): ''' Apply global network configuration. CLI Example: .. code-block:: bash salt '*' ip.apply_network_settings ''' if 'require_reboot' not in settings: settings['require_reboot'] = False if 'apply_hostname' not in settings: settings['apply_hostname'] = False hostname_res = True if settings['apply_hostname'] in _CONFIG_TRUE: if 'hostname' in settings: hostname_res = __salt__['network.mod_hostname'](settings['hostname']) else: log.warning( 'The network state sls is trying to apply hostname ' 'changes but no hostname is defined.' ) hostname_res = False res = True if settings['require_reboot'] in _CONFIG_TRUE: log.warning( 'The network state sls is requiring a reboot of the system to ' 'properly apply network configuration.' ) res = True else: res = __salt__['service.restart']('network') return hostname_res and res
Apply global network configuration. CLI Example: .. code-block:: bash salt '*' ip.apply_network_settings
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/rh_ip.py#L1223-L1260
null
# -*- coding: utf-8 -*- ''' The networking module for RHEL/Fedora based distros ''' from __future__ import absolute_import, unicode_literals, print_function # Import python libs import logging import os.path import os # Import third party libs import jinja2 import jinja2.exceptions # Import salt libs import salt.utils.files import salt.utils.stringutils import salt.utils.templates import salt.utils.validate.net from salt.exceptions import CommandExecutionError from salt.ext import six # Set up logging log = logging.getLogger(__name__) # Set up template environment JINJA = jinja2.Environment( loader=jinja2.FileSystemLoader( os.path.join(salt.utils.templates.TEMPLATE_DIRNAME, 'rh_ip') ) ) # Define the module's virtual name __virtualname__ = 'ip' def __virtual__(): ''' Confine this module to RHEL/Fedora based distros ''' if __grains__['os_family'] == 'RedHat': return __virtualname__ return (False, 'The rh_ip execution module cannot be loaded: this module is only available on RHEL/Fedora based distributions.') # Setup networking attributes _ETHTOOL_CONFIG_OPTS = [ 'autoneg', 'speed', 'duplex', 'rx', 'tx', 'sg', 'tso', 'ufo', 'gso', 'gro', 'lro', 'advertise' ] _RH_CONFIG_OPTS = [ 'domain', 'peerdns', 'peerntp', 'defroute', 'mtu', 'static-routes', 'gateway', 'zone' ] _RH_CONFIG_BONDING_OPTS = [ 'mode', 'miimon', 'arp_interval', 'arp_ip_target', 'downdelay', 'updelay', 'use_carrier', 'lacp_rate', 'hashing-algorithm', 'max_bonds', 'tx_queues', 'num_grat_arp', 'num_unsol_na', 'primary', 'primary_reselect', 'ad_select', 'xmit_hash_policy', 'arp_validate', 'fail_over_mac', 'all_slaves_active', 'resend_igmp' ] _RH_NETWORK_SCRIPT_DIR = '/etc/sysconfig/network-scripts' _RH_NETWORK_FILE = '/etc/sysconfig/network' _RH_NETWORK_CONF_FILES = '/etc/modprobe.d' _CONFIG_TRUE = ['yes', 'on', 'true', '1', True] _CONFIG_FALSE = ['no', 'off', 'false', '0', False] _IFACE_TYPES = [ 'eth', 'bond', 'alias', 'clone', 'ipsec', 'dialup', 'bridge', 'slave', 'vlan', 'ipip', 'ib', ] def _error_msg_iface(iface, option, expected): ''' Build an appropriate error message from a given option and a list of expected values. ''' msg = 'Invalid option -- Interface: {0}, Option: {1}, Expected: [{2}]' return msg.format(iface, option, '|'.join(str(e) for e in expected)) def _error_msg_routes(iface, option, expected): ''' Build an appropriate error message from a given option and a list of expected values. ''' msg = 'Invalid option -- Route interface: {0}, Option: {1}, Expected: [{2}]' return msg.format(iface, option, expected) def _log_default_iface(iface, opt, value): log.info('Using default option -- Interface: %s Option: %s Value: %s', iface, opt, value) def _error_msg_network(option, expected): ''' Build an appropriate error message from a given option and a list of expected values. ''' msg = 'Invalid network setting -- Setting: {0}, Expected: [{1}]' return msg.format(option, '|'.join(str(e) for e in expected)) def _log_default_network(opt, value): log.info('Using existing setting -- Setting: %s Value: %s', opt, value) def _parse_rh_config(path): rh_config = _read_file(path) cv_rh_config = {} if rh_config: for line in rh_config: line = line.strip() if not line or line.startswith('!') or line.startswith('#'): continue pair = [p.rstrip() for p in line.split('=', 1)] if len(pair) != 2: continue name, value = pair cv_rh_config[name.upper()] = value return cv_rh_config def _parse_ethtool_opts(opts, iface): ''' Filters given options and outputs valid settings for ETHTOOLS_OPTS If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' config = {} if 'autoneg' in opts: if opts['autoneg'] in _CONFIG_TRUE: config.update({'autoneg': 'on'}) elif opts['autoneg'] in _CONFIG_FALSE: config.update({'autoneg': 'off'}) else: _raise_error_iface(iface, 'autoneg', _CONFIG_TRUE + _CONFIG_FALSE) if 'duplex' in opts: valid = ['full', 'half'] if opts['duplex'] in valid: config.update({'duplex': opts['duplex']}) else: _raise_error_iface(iface, 'duplex', valid) if 'speed' in opts: valid = ['10', '100', '1000', '10000'] if six.text_type(opts['speed']) in valid: config.update({'speed': opts['speed']}) else: _raise_error_iface(iface, opts['speed'], valid) if 'advertise' in opts: valid = [ '0x001', '0x002', '0x004', '0x008', '0x010', '0x020', '0x20000', '0x8000', '0x1000', '0x40000', '0x80000', '0x200000', '0x400000', '0x800000', '0x1000000', '0x2000000', '0x4000000' ] if six.text_type(opts['advertise']) in valid: config.update({'advertise': opts['advertise']}) else: _raise_error_iface(iface, 'advertise', valid) valid = _CONFIG_TRUE + _CONFIG_FALSE for option in ('rx', 'tx', 'sg', 'tso', 'ufo', 'gso', 'gro', 'lro'): if option in opts: if opts[option] in _CONFIG_TRUE: config.update({option: 'on'}) elif opts[option] in _CONFIG_FALSE: config.update({option: 'off'}) else: _raise_error_iface(iface, option, valid) return config def _parse_settings_bond(opts, iface): ''' Filters given options and outputs valid settings for requested operation. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond_def = { # 803.ad aggregation selection logic # 0 for stable (default) # 1 for bandwidth # 2 for count 'ad_select': '0', # Max number of transmit queues (default = 16) 'tx_queues': '16', # Link monitoring in milliseconds. Most NICs support this 'miimon': '100', # ARP interval in milliseconds 'arp_interval': '250', # Delay before considering link down in milliseconds (miimon * 2) 'downdelay': '200', # lacp_rate 0: Slow - every 30 seconds # lacp_rate 1: Fast - every 1 second 'lacp_rate': '0', # Max bonds for this driver 'max_bonds': '1', # Specifies the time, in milliseconds, to wait before # enabling a slave after a link recovery has been # detected. Only used with miimon. 'updelay': '0', # Used with miimon. # On: driver sends mii # Off: ethtool sends mii 'use_carrier': '0', # Default. Don't change unless you know what you are doing. 'xmit_hash_policy': 'layer2', } if opts['mode'] in ['balance-rr', '0']: log.info( 'Device: %s Bonding Mode: load balancing (round-robin)', iface ) return _parse_settings_bond_0(opts, iface, bond_def) elif opts['mode'] in ['active-backup', '1']: log.info( 'Device: %s Bonding Mode: fault-tolerance (active-backup)', iface ) return _parse_settings_bond_1(opts, iface, bond_def) elif opts['mode'] in ['balance-xor', '2']: log.info( 'Device: %s Bonding Mode: load balancing (xor)', iface ) return _parse_settings_bond_2(opts, iface, bond_def) elif opts['mode'] in ['broadcast', '3']: log.info( 'Device: %s Bonding Mode: fault-tolerance (broadcast)', iface ) return _parse_settings_bond_3(opts, iface, bond_def) elif opts['mode'] in ['802.3ad', '4']: log.info( 'Device: %s Bonding Mode: IEEE 802.3ad Dynamic link ' 'aggregation', iface ) return _parse_settings_bond_4(opts, iface, bond_def) elif opts['mode'] in ['balance-tlb', '5']: log.info( 'Device: %s Bonding Mode: transmit load balancing', iface ) return _parse_settings_bond_5(opts, iface, bond_def) elif opts['mode'] in ['balance-alb', '6']: log.info( 'Device: %s Bonding Mode: adaptive load balancing', iface ) return _parse_settings_bond_6(opts, iface, bond_def) else: valid = [ '0', '1', '2', '3', '4', '5', '6', 'balance-rr', 'active-backup', 'balance-xor', 'broadcast', '802.3ad', 'balance-tlb', 'balance-alb' ] _raise_error_iface(iface, 'mode', valid) def _parse_settings_bond_0(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond0. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' # balance-rr shares miimon settings with balance-xor bond = _parse_settings_bond_1(opts, iface, bond_def) bond.update({'mode': '0'}) # ARP targets in n.n.n.n form valid = ['list of ips (up to 16)'] if 'arp_ip_target' in opts: if isinstance(opts['arp_ip_target'], list): if 1 <= len(opts['arp_ip_target']) <= 16: bond.update({'arp_ip_target': ''}) for ip in opts['arp_ip_target']: # pylint: disable=C0103 if bond['arp_ip_target']: bond['arp_ip_target'] = bond['arp_ip_target'] + ',' + ip else: bond['arp_ip_target'] = ip else: _raise_error_iface(iface, 'arp_ip_target', valid) else: _raise_error_iface(iface, 'arp_ip_target', valid) elif 'miimon' not in opts: _raise_error_iface(iface, 'arp_ip_target', valid) if 'arp_interval' in opts: try: int(opts['arp_interval']) bond.update({'arp_interval': opts['arp_interval']}) except Exception: _raise_error_iface(iface, 'arp_interval', ['integer']) else: _log_default_iface(iface, 'arp_interval', bond_def['arp_interval']) bond.update({'arp_interval': bond_def['arp_interval']}) return bond def _parse_settings_bond_1(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond1. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '1'} for binding in ['miimon', 'downdelay', 'updelay']: if binding in opts: try: int(opts[binding]) bond.update({binding: opts[binding]}) except Exception: _raise_error_iface(iface, binding, ['integer']) else: _log_default_iface(iface, binding, bond_def[binding]) bond.update({binding: bond_def[binding]}) if 'use_carrier' in opts: if opts['use_carrier'] in _CONFIG_TRUE: bond.update({'use_carrier': '1'}) elif opts['use_carrier'] in _CONFIG_FALSE: bond.update({'use_carrier': '0'}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'use_carrier', valid) else: _log_default_iface(iface, 'use_carrier', bond_def['use_carrier']) bond.update({'use_carrier': bond_def['use_carrier']}) if 'primary' in opts: bond.update({'primary': opts['primary']}) return bond def _parse_settings_bond_2(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond2. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '2'} valid = ['list of ips (up to 16)'] if 'arp_ip_target' in opts: if isinstance(opts['arp_ip_target'], list): if 1 <= len(opts['arp_ip_target']) <= 16: bond.update({'arp_ip_target': ''}) for ip in opts['arp_ip_target']: # pylint: disable=C0103 if bond['arp_ip_target']: bond['arp_ip_target'] = bond['arp_ip_target'] + ',' + ip else: bond['arp_ip_target'] = ip else: _raise_error_iface(iface, 'arp_ip_target', valid) else: _raise_error_iface(iface, 'arp_ip_target', valid) else: _raise_error_iface(iface, 'arp_ip_target', valid) if 'arp_interval' in opts: try: int(opts['arp_interval']) bond.update({'arp_interval': opts['arp_interval']}) except Exception: _raise_error_iface(iface, 'arp_interval', ['integer']) else: _log_default_iface(iface, 'arp_interval', bond_def['arp_interval']) bond.update({'arp_interval': bond_def['arp_interval']}) if 'hashing-algorithm' in opts: valid = ['layer2', 'layer2+3', 'layer3+4'] if opts['hashing-algorithm'] in valid: bond.update({'xmit_hash_policy': opts['hashing-algorithm']}) else: _raise_error_iface(iface, 'hashing-algorithm', valid) return bond def _parse_settings_bond_3(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond3. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '3'} for binding in ['miimon', 'downdelay', 'updelay']: if binding in opts: try: int(opts[binding]) bond.update({binding: opts[binding]}) except Exception: _raise_error_iface(iface, binding, ['integer']) else: _log_default_iface(iface, binding, bond_def[binding]) bond.update({binding: bond_def[binding]}) if 'use_carrier' in opts: if opts['use_carrier'] in _CONFIG_TRUE: bond.update({'use_carrier': '1'}) elif opts['use_carrier'] in _CONFIG_FALSE: bond.update({'use_carrier': '0'}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'use_carrier', valid) else: _log_default_iface(iface, 'use_carrier', bond_def['use_carrier']) bond.update({'use_carrier': bond_def['use_carrier']}) return bond def _parse_settings_bond_4(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond4. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '4'} for binding in ['miimon', 'downdelay', 'updelay', 'lacp_rate', 'ad_select']: if binding in opts: if binding == 'lacp_rate': if opts[binding] == 'fast': opts.update({binding: '1'}) if opts[binding] == 'slow': opts.update({binding: '0'}) valid = ['fast', '1', 'slow', '0'] else: valid = ['integer'] try: int(opts[binding]) bond.update({binding: opts[binding]}) except Exception: _raise_error_iface(iface, binding, valid) else: _log_default_iface(iface, binding, bond_def[binding]) bond.update({binding: bond_def[binding]}) if 'use_carrier' in opts: if opts['use_carrier'] in _CONFIG_TRUE: bond.update({'use_carrier': '1'}) elif opts['use_carrier'] in _CONFIG_FALSE: bond.update({'use_carrier': '0'}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'use_carrier', valid) else: _log_default_iface(iface, 'use_carrier', bond_def['use_carrier']) bond.update({'use_carrier': bond_def['use_carrier']}) if 'hashing-algorithm' in opts: valid = ['layer2', 'layer2+3', 'layer3+4'] if opts['hashing-algorithm'] in valid: bond.update({'xmit_hash_policy': opts['hashing-algorithm']}) else: _raise_error_iface(iface, 'hashing-algorithm', valid) return bond def _parse_settings_bond_5(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond5. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '5'} for binding in ['miimon', 'downdelay', 'updelay']: if binding in opts: try: int(opts[binding]) bond.update({binding: opts[binding]}) except Exception: _raise_error_iface(iface, binding, ['integer']) else: _log_default_iface(iface, binding, bond_def[binding]) bond.update({binding: bond_def[binding]}) if 'use_carrier' in opts: if opts['use_carrier'] in _CONFIG_TRUE: bond.update({'use_carrier': '1'}) elif opts['use_carrier'] in _CONFIG_FALSE: bond.update({'use_carrier': '0'}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'use_carrier', valid) else: _log_default_iface(iface, 'use_carrier', bond_def['use_carrier']) bond.update({'use_carrier': bond_def['use_carrier']}) if 'primary' in opts: bond.update({'primary': opts['primary']}) return bond def _parse_settings_bond_6(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond6. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '6'} for binding in ['miimon', 'downdelay', 'updelay']: if binding in opts: try: int(opts[binding]) bond.update({binding: opts[binding]}) except Exception: _raise_error_iface(iface, binding, ['integer']) else: _log_default_iface(iface, binding, bond_def[binding]) bond.update({binding: bond_def[binding]}) if 'use_carrier' in opts: if opts['use_carrier'] in _CONFIG_TRUE: bond.update({'use_carrier': '1'}) elif opts['use_carrier'] in _CONFIG_FALSE: bond.update({'use_carrier': '0'}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'use_carrier', valid) else: _log_default_iface(iface, 'use_carrier', bond_def['use_carrier']) bond.update({'use_carrier': bond_def['use_carrier']}) if 'primary' in opts: bond.update({'primary': opts['primary']}) return bond def _parse_settings_vlan(opts, iface): ''' Filters given options and outputs valid settings for a vlan ''' vlan = {} if 'reorder_hdr' in opts: if opts['reorder_hdr'] in _CONFIG_TRUE + _CONFIG_FALSE: vlan.update({'reorder_hdr': opts['reorder_hdr']}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'reorder_hdr', valid) if 'vlan_id' in opts: if opts['vlan_id'] > 0: vlan.update({'vlan_id': opts['vlan_id']}) else: _raise_error_iface(iface, 'vlan_id', 'Positive integer') if 'phys_dev' in opts: if opts['phys_dev']: vlan.update({'phys_dev': opts['phys_dev']}) else: _raise_error_iface(iface, 'phys_dev', 'Non-empty string') return vlan def _parse_settings_eth(opts, iface_type, enabled, iface): ''' Filters given options and outputs valid settings for a network interface. ''' result = {'name': iface} if 'proto' in opts: valid = ['none', 'bootp', 'dhcp'] if opts['proto'] in valid: result['proto'] = opts['proto'] else: _raise_error_iface(iface, opts['proto'], valid) if 'dns' in opts: result['dns'] = opts['dns'] result['peerdns'] = 'yes' if 'mtu' in opts: try: result['mtu'] = int(opts['mtu']) except ValueError: _raise_error_iface(iface, 'mtu', ['integer']) if iface_type not in ['bridge']: ethtool = _parse_ethtool_opts(opts, iface) if ethtool: result['ethtool'] = ethtool if iface_type == 'slave': result['proto'] = 'none' if iface_type == 'bond': bonding = _parse_settings_bond(opts, iface) if bonding: result['bonding'] = bonding result['devtype'] = "Bond" if iface_type == 'vlan': vlan = _parse_settings_vlan(opts, iface) if vlan: result['devtype'] = "Vlan" for opt in vlan: result[opt] = opts[opt] if iface_type not in ['bond', 'vlan', 'bridge', 'ipip']: auto_addr = False if 'addr' in opts: if salt.utils.validate.net.mac(opts['addr']): result['addr'] = opts['addr'] elif opts['addr'] == 'auto': auto_addr = True elif opts['addr'] != 'none': _raise_error_iface(iface, opts['addr'], ['AA:BB:CC:DD:EE:FF', 'auto', 'none']) else: auto_addr = True if auto_addr: # If interface type is slave for bond, not setting hwaddr if iface_type != 'slave': ifaces = __salt__['network.interfaces']() if iface in ifaces and 'hwaddr' in ifaces[iface]: result['addr'] = ifaces[iface]['hwaddr'] if iface_type == 'eth': result['devtype'] = 'Ethernet' if iface_type == 'bridge': result['devtype'] = 'Bridge' bypassfirewall = True valid = _CONFIG_TRUE + _CONFIG_FALSE for opt in ['bypassfirewall']: if opt in opts: if opts[opt] in _CONFIG_TRUE: bypassfirewall = True elif opts[opt] in _CONFIG_FALSE: bypassfirewall = False else: _raise_error_iface(iface, opts[opt], valid) bridgectls = [ 'net.bridge.bridge-nf-call-ip6tables', 'net.bridge.bridge-nf-call-iptables', 'net.bridge.bridge-nf-call-arptables', ] if bypassfirewall: sysctl_value = 0 else: sysctl_value = 1 for sysctl in bridgectls: try: __salt__['sysctl.persist'](sysctl, sysctl_value) except CommandExecutionError: log.warning('Failed to set sysctl: %s', sysctl) else: if 'bridge' in opts: result['bridge'] = opts['bridge'] if iface_type == 'ipip': result['devtype'] = 'IPIP' for opt in ['my_inner_ipaddr', 'my_outer_ipaddr']: if opt not in opts: _raise_error_iface(iface, opts[opt], ['1.2.3.4']) else: result[opt] = opts[opt] if iface_type == 'ib': result['devtype'] = 'InfiniBand' if 'prefix' in opts: if 'netmask' in opts: msg = 'Cannot use prefix and netmask together' log.error(msg) raise AttributeError(msg) result['prefix'] = opts['prefix'] elif 'netmask' in opts: result['netmask'] = opts['netmask'] for opt in ['ipaddr', 'master', 'srcaddr', 'delay', 'domain', 'gateway', 'uuid', 'nickname', 'zone']: if opt in opts: result[opt] = opts[opt] for opt in ['ipv6addr', 'ipv6gateway']: if opt in opts: result[opt] = opts[opt] if 'ipaddrs' in opts: result['ipaddrs'] = [] for opt in opts['ipaddrs']: if salt.utils.validate.net.ipv4_addr(opt): ip, prefix = [i.strip() for i in opt.split('/')] result['ipaddrs'].append({'ipaddr': ip, 'prefix': prefix}) else: msg = 'ipv4 CIDR is invalid' log.error(msg) raise AttributeError(msg) if 'ipv6addrs' in opts: for opt in opts['ipv6addrs']: if not salt.utils.validate.net.ipv6_addr(opt): msg = 'ipv6 CIDR is invalid' log.error(msg) raise AttributeError(msg) result['ipv6addrs'] = opts['ipv6addrs'] if 'enable_ipv6' in opts: result['enable_ipv6'] = opts['enable_ipv6'] valid = _CONFIG_TRUE + _CONFIG_FALSE for opt in ['onparent', 'peerdns', 'peerroutes', 'slave', 'vlan', 'defroute', 'stp', 'ipv6_peerdns', 'ipv6_defroute', 'ipv6_peerroutes', 'ipv6_autoconf', 'ipv4_failure_fatal', 'dhcpv6c']: if opt in opts: if opts[opt] in _CONFIG_TRUE: result[opt] = 'yes' elif opts[opt] in _CONFIG_FALSE: result[opt] = 'no' else: _raise_error_iface(iface, opts[opt], valid) if 'onboot' in opts: log.warning( 'The \'onboot\' option is controlled by the \'enabled\' option. ' 'Interface: %s Enabled: %s', iface, enabled ) if enabled: result['onboot'] = 'yes' else: result['onboot'] = 'no' # If the interface is defined then we want to always take # control away from non-root users; unless the administrator # wants to allow non-root users to control the device. if 'userctl' in opts: if opts['userctl'] in _CONFIG_TRUE: result['userctl'] = 'yes' elif opts['userctl'] in _CONFIG_FALSE: result['userctl'] = 'no' else: _raise_error_iface(iface, opts['userctl'], valid) else: result['userctl'] = 'no' # This vlan is in opts, and should be only used in range interface # will affect jinja template for interface generating if 'vlan' in opts: if opts['vlan'] in _CONFIG_TRUE: result['vlan'] = 'yes' elif opts['vlan'] in _CONFIG_FALSE: result['vlan'] = 'no' else: _raise_error_iface(iface, opts['vlan'], valid) if 'arpcheck' in opts: if opts['arpcheck'] in _CONFIG_FALSE: result['arpcheck'] = 'no' if 'ipaddr_start' in opts: result['ipaddr_start'] = opts['ipaddr_start'] if 'ipaddr_end' in opts: result['ipaddr_end'] = opts['ipaddr_end'] if 'clonenum_start' in opts: result['clonenum_start'] = opts['clonenum_start'] # If NetworkManager is available, we can control whether we use # it or not if 'nm_controlled' in opts: if opts['nm_controlled'] in _CONFIG_TRUE: result['nm_controlled'] = 'yes' elif opts['nm_controlled'] in _CONFIG_FALSE: result['nm_controlled'] = 'no' else: _raise_error_iface(iface, opts['nm_controlled'], valid) else: result['nm_controlled'] = 'no' return result def _parse_routes(iface, opts): ''' Filters given options and outputs valid settings for the route settings file. ''' # Normalize keys opts = dict((k.lower(), v) for (k, v) in six.iteritems(opts)) result = {} if 'routes' not in opts: _raise_error_routes(iface, 'routes', 'List of routes') for opt in opts: result[opt] = opts[opt] return result def _parse_network_settings(opts, current): ''' Filters given options and outputs valid settings for the global network settings file. ''' # Normalize keys opts = dict((k.lower(), v) for (k, v) in six.iteritems(opts)) current = dict((k.lower(), v) for (k, v) in six.iteritems(current)) # Check for supported parameters retain_settings = opts.get('retain_settings', False) result = current if retain_settings else {} # Default quote type is an empty string, which will not quote values quote_type = '' valid = _CONFIG_TRUE + _CONFIG_FALSE if 'enabled' not in opts: try: opts['networking'] = current['networking'] # If networking option is quoted, use its quote type quote_type = salt.utils.stringutils.is_quoted(opts['networking']) _log_default_network('networking', current['networking']) except ValueError: _raise_error_network('networking', valid) else: opts['networking'] = opts['enabled'] true_val = '{0}yes{0}'.format(quote_type) false_val = '{0}no{0}'.format(quote_type) networking = salt.utils.stringutils.dequote(opts['networking']) if networking in valid: if networking in _CONFIG_TRUE: result['networking'] = true_val elif networking in _CONFIG_FALSE: result['networking'] = false_val else: _raise_error_network('networking', valid) if 'hostname' not in opts: try: opts['hostname'] = current['hostname'] _log_default_network('hostname', current['hostname']) except Exception: _raise_error_network('hostname', ['server1.example.com']) if opts['hostname']: result['hostname'] = '{1}{0}{1}'.format( salt.utils.stringutils.dequote(opts['hostname']), quote_type) else: _raise_error_network('hostname', ['server1.example.com']) if 'nozeroconf' in opts: nozeroconf = salt.utils.stringutils.dequote(opts['nozeroconf']) if nozeroconf in valid: if nozeroconf in _CONFIG_TRUE: result['nozeroconf'] = true_val elif nozeroconf in _CONFIG_FALSE: result['nozeroconf'] = false_val else: _raise_error_network('nozeroconf', valid) for opt in opts: if opt not in ['networking', 'hostname', 'nozeroconf']: result[opt] = '{1}{0}{1}'.format( salt.utils.stringutils.dequote(opts[opt]), quote_type) return result def _raise_error_iface(iface, option, expected): ''' Log and raise an error with a logical formatted message. ''' msg = _error_msg_iface(iface, option, expected) log.error(msg) raise AttributeError(msg) def _raise_error_network(option, expected): ''' Log and raise an error with a logical formatted message. ''' msg = _error_msg_network(option, expected) log.error(msg) raise AttributeError(msg) def _raise_error_routes(iface, option, expected): ''' Log and raise an error with a logical formatted message. ''' msg = _error_msg_routes(iface, option, expected) log.error(msg) raise AttributeError(msg) def _read_file(path): ''' Reads and returns the contents of a file ''' try: with salt.utils.files.fopen(path, 'rb') as rfh: lines = salt.utils.stringutils.to_unicode(rfh.read()).splitlines() try: lines.remove('') except ValueError: pass return lines except Exception: return [] # Return empty list for type consistency def _write_file_iface(iface, data, folder, pattern): ''' Writes a file to disk ''' filename = os.path.join(folder, pattern.format(iface)) if not os.path.exists(folder): msg = '{0} cannot be written. {1} does not exist' msg = msg.format(filename, folder) log.error(msg) raise AttributeError(msg) with salt.utils.files.fopen(filename, 'w') as fp_: fp_.write(salt.utils.stringutils.to_str(data)) def _write_file_network(data, filename): ''' Writes a file to disk ''' with salt.utils.files.fopen(filename, 'w') as fp_: fp_.write(salt.utils.stringutils.to_str(data)) def _read_temp(data): lines = data.splitlines() try: # Discard newlines if they exist lines.remove('') except ValueError: pass return lines def build_bond(iface, **settings): ''' Create a bond script in /etc/modprobe.d with the passed settings and load the bonding kernel module. CLI Example: .. code-block:: bash salt '*' ip.build_bond bond0 mode=balance-alb ''' rh_major = __grains__['osrelease'][:1] opts = _parse_settings_bond(settings, iface) try: template = JINJA.get_template('conf.jinja') except jinja2.exceptions.TemplateNotFound: log.error('Could not load template conf.jinja') return '' data = template.render({'name': iface, 'bonding': opts}) _write_file_iface(iface, data, _RH_NETWORK_CONF_FILES, '{0}.conf'.format(iface)) path = os.path.join(_RH_NETWORK_CONF_FILES, '{0}.conf'.format(iface)) if rh_major == '5': __salt__['cmd.run']( 'sed -i -e "/^alias\\s{0}.*/d" /etc/modprobe.conf'.format(iface), python_shell=False ) __salt__['cmd.run']( 'sed -i -e "/^options\\s{0}.*/d" /etc/modprobe.conf'.format(iface), python_shell=False ) __salt__['file.append']('/etc/modprobe.conf', path) __salt__['kmod.load']('bonding') if settings['test']: return _read_temp(data) return _read_file(path) def build_interface(iface, iface_type, enabled, **settings): ''' Build an interface script for a network interface. CLI Example: .. code-block:: bash salt '*' ip.build_interface eth0 eth <settings> ''' if __grains__['os'] == 'Fedora': if __grains__['osmajorrelease'] >= 18: rh_major = '7' else: rh_major = '6' else: rh_major = __grains__['osrelease'][:1] iface_type = iface_type.lower() if iface_type not in _IFACE_TYPES: _raise_error_iface(iface, iface_type, _IFACE_TYPES) if iface_type == 'slave': settings['slave'] = 'yes' if 'master' not in settings: msg = 'master is a required setting for slave interfaces' log.error(msg) raise AttributeError(msg) if iface_type == 'vlan': settings['vlan'] = 'yes' if iface_type == 'bridge': __salt__['pkg.install']('bridge-utils') if iface_type in ['eth', 'bond', 'bridge', 'slave', 'vlan', 'ipip', 'ib', 'alias']: opts = _parse_settings_eth(settings, iface_type, enabled, iface) try: template = JINJA.get_template('rh{0}_eth.jinja'.format(rh_major)) except jinja2.exceptions.TemplateNotFound: log.error( 'Could not load template rh%s_eth.jinja', rh_major ) return '' ifcfg = template.render(opts) if 'test' in settings and settings['test']: return _read_temp(ifcfg) _write_file_iface(iface, ifcfg, _RH_NETWORK_SCRIPT_DIR, 'ifcfg-{0}') path = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'ifcfg-{0}'.format(iface)) return _read_file(path) def build_routes(iface, **settings): ''' Build a route script for a network interface. CLI Example: .. code-block:: bash salt '*' ip.build_routes eth0 <settings> ''' template = 'rh6_route_eth.jinja' try: if int(__grains__['osrelease'][0]) < 6: template = 'route_eth.jinja' except ValueError: pass log.debug('Template name: %s', template) opts = _parse_routes(iface, settings) log.debug('Opts: \n %s', opts) try: template = JINJA.get_template(template) except jinja2.exceptions.TemplateNotFound: log.error('Could not load template %s', template) return '' opts6 = [] opts4 = [] for route in opts['routes']: ipaddr = route['ipaddr'] if salt.utils.validate.net.ipv6_addr(ipaddr): opts6.append(route) else: opts4.append(route) log.debug("IPv4 routes:\n%s", opts4) log.debug("IPv6 routes:\n%s", opts6) routecfg = template.render(routes=opts4, iface=iface) routecfg6 = template.render(routes=opts6, iface=iface) if settings['test']: routes = _read_temp(routecfg) routes.extend(_read_temp(routecfg6)) return routes _write_file_iface(iface, routecfg, _RH_NETWORK_SCRIPT_DIR, 'route-{0}') _write_file_iface(iface, routecfg6, _RH_NETWORK_SCRIPT_DIR, 'route6-{0}') path = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'route-{0}'.format(iface)) path6 = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'route6-{0}'.format(iface)) routes = _read_file(path) routes.extend(_read_file(path6)) return routes def down(iface, iface_type): ''' Shutdown a network interface CLI Example: .. code-block:: bash salt '*' ip.down eth0 ''' # Slave devices are controlled by the master. if iface_type not in ['slave']: return __salt__['cmd.run']('ifdown {0}'.format(iface)) return None def get_bond(iface): ''' Return the content of a bond script CLI Example: .. code-block:: bash salt '*' ip.get_bond bond0 ''' path = os.path.join(_RH_NETWORK_CONF_FILES, '{0}.conf'.format(iface)) return _read_file(path) def get_interface(iface): ''' Return the contents of an interface script CLI Example: .. code-block:: bash salt '*' ip.get_interface eth0 ''' path = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'ifcfg-{0}'.format(iface)) return _read_file(path) def up(iface, iface_type): # pylint: disable=C0103 ''' Start up a network interface CLI Example: .. code-block:: bash salt '*' ip.up eth0 ''' # Slave devices are controlled by the master. if iface_type not in ['slave']: return __salt__['cmd.run']('ifup {0}'.format(iface)) return None def get_routes(iface): ''' Return the contents of the interface routes script. CLI Example: .. code-block:: bash salt '*' ip.get_routes eth0 ''' path = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'route-{0}'.format(iface)) path6 = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'route6-{0}'.format(iface)) routes = _read_file(path) routes.extend(_read_file(path6)) return routes def get_network_settings(): ''' Return the contents of the global network script. CLI Example: .. code-block:: bash salt '*' ip.get_network_settings ''' return _read_file(_RH_NETWORK_FILE) def build_network_settings(**settings): ''' Build the global network script. CLI Example: .. code-block:: bash salt '*' ip.build_network_settings <settings> ''' # Read current configuration and store default values current_network_settings = _parse_rh_config(_RH_NETWORK_FILE) # Build settings opts = _parse_network_settings(settings, current_network_settings) try: template = JINJA.get_template('network.jinja') except jinja2.exceptions.TemplateNotFound: log.error('Could not load template network.jinja') return '' network = template.render(opts) if settings['test']: return _read_temp(network) # Write settings _write_file_network(network, _RH_NETWORK_FILE) return _read_file(_RH_NETWORK_FILE)
saltstack/salt
salt/modules/rh_ip.py
build_network_settings
python
def build_network_settings(**settings): ''' Build the global network script. CLI Example: .. code-block:: bash salt '*' ip.build_network_settings <settings> ''' # Read current configuration and store default values current_network_settings = _parse_rh_config(_RH_NETWORK_FILE) # Build settings opts = _parse_network_settings(settings, current_network_settings) try: template = JINJA.get_template('network.jinja') except jinja2.exceptions.TemplateNotFound: log.error('Could not load template network.jinja') return '' network = template.render(opts) if settings['test']: return _read_temp(network) # Write settings _write_file_network(network, _RH_NETWORK_FILE) return _read_file(_RH_NETWORK_FILE)
Build the global network script. CLI Example: .. code-block:: bash salt '*' ip.build_network_settings <settings>
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/rh_ip.py#L1263-L1291
[ "def _read_file(path):\n '''\n Reads and returns the contents of a file\n '''\n try:\n with salt.utils.files.fopen(path, 'rb') as rfh:\n lines = salt.utils.stringutils.to_unicode(rfh.read()).splitlines()\n try:\n lines.remove('')\n except ValueError:\n pass\n return lines\n except Exception:\n return [] # Return empty list for type consistency\n", "def _parse_network_settings(opts, current):\n '''\n Filters given options and outputs valid settings for\n the global network settings file.\n '''\n # Normalize keys\n opts = dict((k.lower(), v) for (k, v) in six.iteritems(opts))\n current = dict((k.lower(), v) for (k, v) in six.iteritems(current))\n\n # Check for supported parameters\n retain_settings = opts.get('retain_settings', False)\n result = current if retain_settings else {}\n\n # Default quote type is an empty string, which will not quote values\n quote_type = ''\n\n valid = _CONFIG_TRUE + _CONFIG_FALSE\n if 'enabled' not in opts:\n try:\n opts['networking'] = current['networking']\n # If networking option is quoted, use its quote type\n quote_type = salt.utils.stringutils.is_quoted(opts['networking'])\n _log_default_network('networking', current['networking'])\n except ValueError:\n _raise_error_network('networking', valid)\n else:\n opts['networking'] = opts['enabled']\n\n true_val = '{0}yes{0}'.format(quote_type)\n false_val = '{0}no{0}'.format(quote_type)\n\n networking = salt.utils.stringutils.dequote(opts['networking'])\n if networking in valid:\n if networking in _CONFIG_TRUE:\n result['networking'] = true_val\n elif networking in _CONFIG_FALSE:\n result['networking'] = false_val\n else:\n _raise_error_network('networking', valid)\n\n if 'hostname' not in opts:\n try:\n opts['hostname'] = current['hostname']\n _log_default_network('hostname', current['hostname'])\n except Exception:\n _raise_error_network('hostname', ['server1.example.com'])\n\n if opts['hostname']:\n result['hostname'] = '{1}{0}{1}'.format(\n salt.utils.stringutils.dequote(opts['hostname']), quote_type)\n else:\n _raise_error_network('hostname', ['server1.example.com'])\n\n if 'nozeroconf' in opts:\n nozeroconf = salt.utils.stringutils.dequote(opts['nozeroconf'])\n if nozeroconf in valid:\n if nozeroconf in _CONFIG_TRUE:\n result['nozeroconf'] = true_val\n elif nozeroconf in _CONFIG_FALSE:\n result['nozeroconf'] = false_val\n else:\n _raise_error_network('nozeroconf', valid)\n\n for opt in opts:\n if opt not in ['networking', 'hostname', 'nozeroconf']:\n result[opt] = '{1}{0}{1}'.format(\n salt.utils.stringutils.dequote(opts[opt]), quote_type)\n return result\n", "def _write_file_network(data, filename):\n '''\n Writes a file to disk\n '''\n with salt.utils.files.fopen(filename, 'w') as fp_:\n fp_.write(salt.utils.stringutils.to_str(data))\n", "def _read_temp(data):\n lines = data.splitlines()\n try: # Discard newlines if they exist\n lines.remove('')\n except ValueError:\n pass\n return lines\n", "def _parse_rh_config(path):\n rh_config = _read_file(path)\n cv_rh_config = {}\n if rh_config:\n for line in rh_config:\n line = line.strip()\n if not line or line.startswith('!') or line.startswith('#'):\n continue\n pair = [p.rstrip() for p in line.split('=', 1)]\n if len(pair) != 2:\n continue\n name, value = pair\n cv_rh_config[name.upper()] = value\n\n return cv_rh_config\n" ]
# -*- coding: utf-8 -*- ''' The networking module for RHEL/Fedora based distros ''' from __future__ import absolute_import, unicode_literals, print_function # Import python libs import logging import os.path import os # Import third party libs import jinja2 import jinja2.exceptions # Import salt libs import salt.utils.files import salt.utils.stringutils import salt.utils.templates import salt.utils.validate.net from salt.exceptions import CommandExecutionError from salt.ext import six # Set up logging log = logging.getLogger(__name__) # Set up template environment JINJA = jinja2.Environment( loader=jinja2.FileSystemLoader( os.path.join(salt.utils.templates.TEMPLATE_DIRNAME, 'rh_ip') ) ) # Define the module's virtual name __virtualname__ = 'ip' def __virtual__(): ''' Confine this module to RHEL/Fedora based distros ''' if __grains__['os_family'] == 'RedHat': return __virtualname__ return (False, 'The rh_ip execution module cannot be loaded: this module is only available on RHEL/Fedora based distributions.') # Setup networking attributes _ETHTOOL_CONFIG_OPTS = [ 'autoneg', 'speed', 'duplex', 'rx', 'tx', 'sg', 'tso', 'ufo', 'gso', 'gro', 'lro', 'advertise' ] _RH_CONFIG_OPTS = [ 'domain', 'peerdns', 'peerntp', 'defroute', 'mtu', 'static-routes', 'gateway', 'zone' ] _RH_CONFIG_BONDING_OPTS = [ 'mode', 'miimon', 'arp_interval', 'arp_ip_target', 'downdelay', 'updelay', 'use_carrier', 'lacp_rate', 'hashing-algorithm', 'max_bonds', 'tx_queues', 'num_grat_arp', 'num_unsol_na', 'primary', 'primary_reselect', 'ad_select', 'xmit_hash_policy', 'arp_validate', 'fail_over_mac', 'all_slaves_active', 'resend_igmp' ] _RH_NETWORK_SCRIPT_DIR = '/etc/sysconfig/network-scripts' _RH_NETWORK_FILE = '/etc/sysconfig/network' _RH_NETWORK_CONF_FILES = '/etc/modprobe.d' _CONFIG_TRUE = ['yes', 'on', 'true', '1', True] _CONFIG_FALSE = ['no', 'off', 'false', '0', False] _IFACE_TYPES = [ 'eth', 'bond', 'alias', 'clone', 'ipsec', 'dialup', 'bridge', 'slave', 'vlan', 'ipip', 'ib', ] def _error_msg_iface(iface, option, expected): ''' Build an appropriate error message from a given option and a list of expected values. ''' msg = 'Invalid option -- Interface: {0}, Option: {1}, Expected: [{2}]' return msg.format(iface, option, '|'.join(str(e) for e in expected)) def _error_msg_routes(iface, option, expected): ''' Build an appropriate error message from a given option and a list of expected values. ''' msg = 'Invalid option -- Route interface: {0}, Option: {1}, Expected: [{2}]' return msg.format(iface, option, expected) def _log_default_iface(iface, opt, value): log.info('Using default option -- Interface: %s Option: %s Value: %s', iface, opt, value) def _error_msg_network(option, expected): ''' Build an appropriate error message from a given option and a list of expected values. ''' msg = 'Invalid network setting -- Setting: {0}, Expected: [{1}]' return msg.format(option, '|'.join(str(e) for e in expected)) def _log_default_network(opt, value): log.info('Using existing setting -- Setting: %s Value: %s', opt, value) def _parse_rh_config(path): rh_config = _read_file(path) cv_rh_config = {} if rh_config: for line in rh_config: line = line.strip() if not line or line.startswith('!') or line.startswith('#'): continue pair = [p.rstrip() for p in line.split('=', 1)] if len(pair) != 2: continue name, value = pair cv_rh_config[name.upper()] = value return cv_rh_config def _parse_ethtool_opts(opts, iface): ''' Filters given options and outputs valid settings for ETHTOOLS_OPTS If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' config = {} if 'autoneg' in opts: if opts['autoneg'] in _CONFIG_TRUE: config.update({'autoneg': 'on'}) elif opts['autoneg'] in _CONFIG_FALSE: config.update({'autoneg': 'off'}) else: _raise_error_iface(iface, 'autoneg', _CONFIG_TRUE + _CONFIG_FALSE) if 'duplex' in opts: valid = ['full', 'half'] if opts['duplex'] in valid: config.update({'duplex': opts['duplex']}) else: _raise_error_iface(iface, 'duplex', valid) if 'speed' in opts: valid = ['10', '100', '1000', '10000'] if six.text_type(opts['speed']) in valid: config.update({'speed': opts['speed']}) else: _raise_error_iface(iface, opts['speed'], valid) if 'advertise' in opts: valid = [ '0x001', '0x002', '0x004', '0x008', '0x010', '0x020', '0x20000', '0x8000', '0x1000', '0x40000', '0x80000', '0x200000', '0x400000', '0x800000', '0x1000000', '0x2000000', '0x4000000' ] if six.text_type(opts['advertise']) in valid: config.update({'advertise': opts['advertise']}) else: _raise_error_iface(iface, 'advertise', valid) valid = _CONFIG_TRUE + _CONFIG_FALSE for option in ('rx', 'tx', 'sg', 'tso', 'ufo', 'gso', 'gro', 'lro'): if option in opts: if opts[option] in _CONFIG_TRUE: config.update({option: 'on'}) elif opts[option] in _CONFIG_FALSE: config.update({option: 'off'}) else: _raise_error_iface(iface, option, valid) return config def _parse_settings_bond(opts, iface): ''' Filters given options and outputs valid settings for requested operation. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond_def = { # 803.ad aggregation selection logic # 0 for stable (default) # 1 for bandwidth # 2 for count 'ad_select': '0', # Max number of transmit queues (default = 16) 'tx_queues': '16', # Link monitoring in milliseconds. Most NICs support this 'miimon': '100', # ARP interval in milliseconds 'arp_interval': '250', # Delay before considering link down in milliseconds (miimon * 2) 'downdelay': '200', # lacp_rate 0: Slow - every 30 seconds # lacp_rate 1: Fast - every 1 second 'lacp_rate': '0', # Max bonds for this driver 'max_bonds': '1', # Specifies the time, in milliseconds, to wait before # enabling a slave after a link recovery has been # detected. Only used with miimon. 'updelay': '0', # Used with miimon. # On: driver sends mii # Off: ethtool sends mii 'use_carrier': '0', # Default. Don't change unless you know what you are doing. 'xmit_hash_policy': 'layer2', } if opts['mode'] in ['balance-rr', '0']: log.info( 'Device: %s Bonding Mode: load balancing (round-robin)', iface ) return _parse_settings_bond_0(opts, iface, bond_def) elif opts['mode'] in ['active-backup', '1']: log.info( 'Device: %s Bonding Mode: fault-tolerance (active-backup)', iface ) return _parse_settings_bond_1(opts, iface, bond_def) elif opts['mode'] in ['balance-xor', '2']: log.info( 'Device: %s Bonding Mode: load balancing (xor)', iface ) return _parse_settings_bond_2(opts, iface, bond_def) elif opts['mode'] in ['broadcast', '3']: log.info( 'Device: %s Bonding Mode: fault-tolerance (broadcast)', iface ) return _parse_settings_bond_3(opts, iface, bond_def) elif opts['mode'] in ['802.3ad', '4']: log.info( 'Device: %s Bonding Mode: IEEE 802.3ad Dynamic link ' 'aggregation', iface ) return _parse_settings_bond_4(opts, iface, bond_def) elif opts['mode'] in ['balance-tlb', '5']: log.info( 'Device: %s Bonding Mode: transmit load balancing', iface ) return _parse_settings_bond_5(opts, iface, bond_def) elif opts['mode'] in ['balance-alb', '6']: log.info( 'Device: %s Bonding Mode: adaptive load balancing', iface ) return _parse_settings_bond_6(opts, iface, bond_def) else: valid = [ '0', '1', '2', '3', '4', '5', '6', 'balance-rr', 'active-backup', 'balance-xor', 'broadcast', '802.3ad', 'balance-tlb', 'balance-alb' ] _raise_error_iface(iface, 'mode', valid) def _parse_settings_bond_0(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond0. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' # balance-rr shares miimon settings with balance-xor bond = _parse_settings_bond_1(opts, iface, bond_def) bond.update({'mode': '0'}) # ARP targets in n.n.n.n form valid = ['list of ips (up to 16)'] if 'arp_ip_target' in opts: if isinstance(opts['arp_ip_target'], list): if 1 <= len(opts['arp_ip_target']) <= 16: bond.update({'arp_ip_target': ''}) for ip in opts['arp_ip_target']: # pylint: disable=C0103 if bond['arp_ip_target']: bond['arp_ip_target'] = bond['arp_ip_target'] + ',' + ip else: bond['arp_ip_target'] = ip else: _raise_error_iface(iface, 'arp_ip_target', valid) else: _raise_error_iface(iface, 'arp_ip_target', valid) elif 'miimon' not in opts: _raise_error_iface(iface, 'arp_ip_target', valid) if 'arp_interval' in opts: try: int(opts['arp_interval']) bond.update({'arp_interval': opts['arp_interval']}) except Exception: _raise_error_iface(iface, 'arp_interval', ['integer']) else: _log_default_iface(iface, 'arp_interval', bond_def['arp_interval']) bond.update({'arp_interval': bond_def['arp_interval']}) return bond def _parse_settings_bond_1(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond1. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '1'} for binding in ['miimon', 'downdelay', 'updelay']: if binding in opts: try: int(opts[binding]) bond.update({binding: opts[binding]}) except Exception: _raise_error_iface(iface, binding, ['integer']) else: _log_default_iface(iface, binding, bond_def[binding]) bond.update({binding: bond_def[binding]}) if 'use_carrier' in opts: if opts['use_carrier'] in _CONFIG_TRUE: bond.update({'use_carrier': '1'}) elif opts['use_carrier'] in _CONFIG_FALSE: bond.update({'use_carrier': '0'}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'use_carrier', valid) else: _log_default_iface(iface, 'use_carrier', bond_def['use_carrier']) bond.update({'use_carrier': bond_def['use_carrier']}) if 'primary' in opts: bond.update({'primary': opts['primary']}) return bond def _parse_settings_bond_2(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond2. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '2'} valid = ['list of ips (up to 16)'] if 'arp_ip_target' in opts: if isinstance(opts['arp_ip_target'], list): if 1 <= len(opts['arp_ip_target']) <= 16: bond.update({'arp_ip_target': ''}) for ip in opts['arp_ip_target']: # pylint: disable=C0103 if bond['arp_ip_target']: bond['arp_ip_target'] = bond['arp_ip_target'] + ',' + ip else: bond['arp_ip_target'] = ip else: _raise_error_iface(iface, 'arp_ip_target', valid) else: _raise_error_iface(iface, 'arp_ip_target', valid) else: _raise_error_iface(iface, 'arp_ip_target', valid) if 'arp_interval' in opts: try: int(opts['arp_interval']) bond.update({'arp_interval': opts['arp_interval']}) except Exception: _raise_error_iface(iface, 'arp_interval', ['integer']) else: _log_default_iface(iface, 'arp_interval', bond_def['arp_interval']) bond.update({'arp_interval': bond_def['arp_interval']}) if 'hashing-algorithm' in opts: valid = ['layer2', 'layer2+3', 'layer3+4'] if opts['hashing-algorithm'] in valid: bond.update({'xmit_hash_policy': opts['hashing-algorithm']}) else: _raise_error_iface(iface, 'hashing-algorithm', valid) return bond def _parse_settings_bond_3(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond3. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '3'} for binding in ['miimon', 'downdelay', 'updelay']: if binding in opts: try: int(opts[binding]) bond.update({binding: opts[binding]}) except Exception: _raise_error_iface(iface, binding, ['integer']) else: _log_default_iface(iface, binding, bond_def[binding]) bond.update({binding: bond_def[binding]}) if 'use_carrier' in opts: if opts['use_carrier'] in _CONFIG_TRUE: bond.update({'use_carrier': '1'}) elif opts['use_carrier'] in _CONFIG_FALSE: bond.update({'use_carrier': '0'}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'use_carrier', valid) else: _log_default_iface(iface, 'use_carrier', bond_def['use_carrier']) bond.update({'use_carrier': bond_def['use_carrier']}) return bond def _parse_settings_bond_4(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond4. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '4'} for binding in ['miimon', 'downdelay', 'updelay', 'lacp_rate', 'ad_select']: if binding in opts: if binding == 'lacp_rate': if opts[binding] == 'fast': opts.update({binding: '1'}) if opts[binding] == 'slow': opts.update({binding: '0'}) valid = ['fast', '1', 'slow', '0'] else: valid = ['integer'] try: int(opts[binding]) bond.update({binding: opts[binding]}) except Exception: _raise_error_iface(iface, binding, valid) else: _log_default_iface(iface, binding, bond_def[binding]) bond.update({binding: bond_def[binding]}) if 'use_carrier' in opts: if opts['use_carrier'] in _CONFIG_TRUE: bond.update({'use_carrier': '1'}) elif opts['use_carrier'] in _CONFIG_FALSE: bond.update({'use_carrier': '0'}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'use_carrier', valid) else: _log_default_iface(iface, 'use_carrier', bond_def['use_carrier']) bond.update({'use_carrier': bond_def['use_carrier']}) if 'hashing-algorithm' in opts: valid = ['layer2', 'layer2+3', 'layer3+4'] if opts['hashing-algorithm'] in valid: bond.update({'xmit_hash_policy': opts['hashing-algorithm']}) else: _raise_error_iface(iface, 'hashing-algorithm', valid) return bond def _parse_settings_bond_5(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond5. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '5'} for binding in ['miimon', 'downdelay', 'updelay']: if binding in opts: try: int(opts[binding]) bond.update({binding: opts[binding]}) except Exception: _raise_error_iface(iface, binding, ['integer']) else: _log_default_iface(iface, binding, bond_def[binding]) bond.update({binding: bond_def[binding]}) if 'use_carrier' in opts: if opts['use_carrier'] in _CONFIG_TRUE: bond.update({'use_carrier': '1'}) elif opts['use_carrier'] in _CONFIG_FALSE: bond.update({'use_carrier': '0'}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'use_carrier', valid) else: _log_default_iface(iface, 'use_carrier', bond_def['use_carrier']) bond.update({'use_carrier': bond_def['use_carrier']}) if 'primary' in opts: bond.update({'primary': opts['primary']}) return bond def _parse_settings_bond_6(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond6. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '6'} for binding in ['miimon', 'downdelay', 'updelay']: if binding in opts: try: int(opts[binding]) bond.update({binding: opts[binding]}) except Exception: _raise_error_iface(iface, binding, ['integer']) else: _log_default_iface(iface, binding, bond_def[binding]) bond.update({binding: bond_def[binding]}) if 'use_carrier' in opts: if opts['use_carrier'] in _CONFIG_TRUE: bond.update({'use_carrier': '1'}) elif opts['use_carrier'] in _CONFIG_FALSE: bond.update({'use_carrier': '0'}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'use_carrier', valid) else: _log_default_iface(iface, 'use_carrier', bond_def['use_carrier']) bond.update({'use_carrier': bond_def['use_carrier']}) if 'primary' in opts: bond.update({'primary': opts['primary']}) return bond def _parse_settings_vlan(opts, iface): ''' Filters given options and outputs valid settings for a vlan ''' vlan = {} if 'reorder_hdr' in opts: if opts['reorder_hdr'] in _CONFIG_TRUE + _CONFIG_FALSE: vlan.update({'reorder_hdr': opts['reorder_hdr']}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'reorder_hdr', valid) if 'vlan_id' in opts: if opts['vlan_id'] > 0: vlan.update({'vlan_id': opts['vlan_id']}) else: _raise_error_iface(iface, 'vlan_id', 'Positive integer') if 'phys_dev' in opts: if opts['phys_dev']: vlan.update({'phys_dev': opts['phys_dev']}) else: _raise_error_iface(iface, 'phys_dev', 'Non-empty string') return vlan def _parse_settings_eth(opts, iface_type, enabled, iface): ''' Filters given options and outputs valid settings for a network interface. ''' result = {'name': iface} if 'proto' in opts: valid = ['none', 'bootp', 'dhcp'] if opts['proto'] in valid: result['proto'] = opts['proto'] else: _raise_error_iface(iface, opts['proto'], valid) if 'dns' in opts: result['dns'] = opts['dns'] result['peerdns'] = 'yes' if 'mtu' in opts: try: result['mtu'] = int(opts['mtu']) except ValueError: _raise_error_iface(iface, 'mtu', ['integer']) if iface_type not in ['bridge']: ethtool = _parse_ethtool_opts(opts, iface) if ethtool: result['ethtool'] = ethtool if iface_type == 'slave': result['proto'] = 'none' if iface_type == 'bond': bonding = _parse_settings_bond(opts, iface) if bonding: result['bonding'] = bonding result['devtype'] = "Bond" if iface_type == 'vlan': vlan = _parse_settings_vlan(opts, iface) if vlan: result['devtype'] = "Vlan" for opt in vlan: result[opt] = opts[opt] if iface_type not in ['bond', 'vlan', 'bridge', 'ipip']: auto_addr = False if 'addr' in opts: if salt.utils.validate.net.mac(opts['addr']): result['addr'] = opts['addr'] elif opts['addr'] == 'auto': auto_addr = True elif opts['addr'] != 'none': _raise_error_iface(iface, opts['addr'], ['AA:BB:CC:DD:EE:FF', 'auto', 'none']) else: auto_addr = True if auto_addr: # If interface type is slave for bond, not setting hwaddr if iface_type != 'slave': ifaces = __salt__['network.interfaces']() if iface in ifaces and 'hwaddr' in ifaces[iface]: result['addr'] = ifaces[iface]['hwaddr'] if iface_type == 'eth': result['devtype'] = 'Ethernet' if iface_type == 'bridge': result['devtype'] = 'Bridge' bypassfirewall = True valid = _CONFIG_TRUE + _CONFIG_FALSE for opt in ['bypassfirewall']: if opt in opts: if opts[opt] in _CONFIG_TRUE: bypassfirewall = True elif opts[opt] in _CONFIG_FALSE: bypassfirewall = False else: _raise_error_iface(iface, opts[opt], valid) bridgectls = [ 'net.bridge.bridge-nf-call-ip6tables', 'net.bridge.bridge-nf-call-iptables', 'net.bridge.bridge-nf-call-arptables', ] if bypassfirewall: sysctl_value = 0 else: sysctl_value = 1 for sysctl in bridgectls: try: __salt__['sysctl.persist'](sysctl, sysctl_value) except CommandExecutionError: log.warning('Failed to set sysctl: %s', sysctl) else: if 'bridge' in opts: result['bridge'] = opts['bridge'] if iface_type == 'ipip': result['devtype'] = 'IPIP' for opt in ['my_inner_ipaddr', 'my_outer_ipaddr']: if opt not in opts: _raise_error_iface(iface, opts[opt], ['1.2.3.4']) else: result[opt] = opts[opt] if iface_type == 'ib': result['devtype'] = 'InfiniBand' if 'prefix' in opts: if 'netmask' in opts: msg = 'Cannot use prefix and netmask together' log.error(msg) raise AttributeError(msg) result['prefix'] = opts['prefix'] elif 'netmask' in opts: result['netmask'] = opts['netmask'] for opt in ['ipaddr', 'master', 'srcaddr', 'delay', 'domain', 'gateway', 'uuid', 'nickname', 'zone']: if opt in opts: result[opt] = opts[opt] for opt in ['ipv6addr', 'ipv6gateway']: if opt in opts: result[opt] = opts[opt] if 'ipaddrs' in opts: result['ipaddrs'] = [] for opt in opts['ipaddrs']: if salt.utils.validate.net.ipv4_addr(opt): ip, prefix = [i.strip() for i in opt.split('/')] result['ipaddrs'].append({'ipaddr': ip, 'prefix': prefix}) else: msg = 'ipv4 CIDR is invalid' log.error(msg) raise AttributeError(msg) if 'ipv6addrs' in opts: for opt in opts['ipv6addrs']: if not salt.utils.validate.net.ipv6_addr(opt): msg = 'ipv6 CIDR is invalid' log.error(msg) raise AttributeError(msg) result['ipv6addrs'] = opts['ipv6addrs'] if 'enable_ipv6' in opts: result['enable_ipv6'] = opts['enable_ipv6'] valid = _CONFIG_TRUE + _CONFIG_FALSE for opt in ['onparent', 'peerdns', 'peerroutes', 'slave', 'vlan', 'defroute', 'stp', 'ipv6_peerdns', 'ipv6_defroute', 'ipv6_peerroutes', 'ipv6_autoconf', 'ipv4_failure_fatal', 'dhcpv6c']: if opt in opts: if opts[opt] in _CONFIG_TRUE: result[opt] = 'yes' elif opts[opt] in _CONFIG_FALSE: result[opt] = 'no' else: _raise_error_iface(iface, opts[opt], valid) if 'onboot' in opts: log.warning( 'The \'onboot\' option is controlled by the \'enabled\' option. ' 'Interface: %s Enabled: %s', iface, enabled ) if enabled: result['onboot'] = 'yes' else: result['onboot'] = 'no' # If the interface is defined then we want to always take # control away from non-root users; unless the administrator # wants to allow non-root users to control the device. if 'userctl' in opts: if opts['userctl'] in _CONFIG_TRUE: result['userctl'] = 'yes' elif opts['userctl'] in _CONFIG_FALSE: result['userctl'] = 'no' else: _raise_error_iface(iface, opts['userctl'], valid) else: result['userctl'] = 'no' # This vlan is in opts, and should be only used in range interface # will affect jinja template for interface generating if 'vlan' in opts: if opts['vlan'] in _CONFIG_TRUE: result['vlan'] = 'yes' elif opts['vlan'] in _CONFIG_FALSE: result['vlan'] = 'no' else: _raise_error_iface(iface, opts['vlan'], valid) if 'arpcheck' in opts: if opts['arpcheck'] in _CONFIG_FALSE: result['arpcheck'] = 'no' if 'ipaddr_start' in opts: result['ipaddr_start'] = opts['ipaddr_start'] if 'ipaddr_end' in opts: result['ipaddr_end'] = opts['ipaddr_end'] if 'clonenum_start' in opts: result['clonenum_start'] = opts['clonenum_start'] # If NetworkManager is available, we can control whether we use # it or not if 'nm_controlled' in opts: if opts['nm_controlled'] in _CONFIG_TRUE: result['nm_controlled'] = 'yes' elif opts['nm_controlled'] in _CONFIG_FALSE: result['nm_controlled'] = 'no' else: _raise_error_iface(iface, opts['nm_controlled'], valid) else: result['nm_controlled'] = 'no' return result def _parse_routes(iface, opts): ''' Filters given options and outputs valid settings for the route settings file. ''' # Normalize keys opts = dict((k.lower(), v) for (k, v) in six.iteritems(opts)) result = {} if 'routes' not in opts: _raise_error_routes(iface, 'routes', 'List of routes') for opt in opts: result[opt] = opts[opt] return result def _parse_network_settings(opts, current): ''' Filters given options and outputs valid settings for the global network settings file. ''' # Normalize keys opts = dict((k.lower(), v) for (k, v) in six.iteritems(opts)) current = dict((k.lower(), v) for (k, v) in six.iteritems(current)) # Check for supported parameters retain_settings = opts.get('retain_settings', False) result = current if retain_settings else {} # Default quote type is an empty string, which will not quote values quote_type = '' valid = _CONFIG_TRUE + _CONFIG_FALSE if 'enabled' not in opts: try: opts['networking'] = current['networking'] # If networking option is quoted, use its quote type quote_type = salt.utils.stringutils.is_quoted(opts['networking']) _log_default_network('networking', current['networking']) except ValueError: _raise_error_network('networking', valid) else: opts['networking'] = opts['enabled'] true_val = '{0}yes{0}'.format(quote_type) false_val = '{0}no{0}'.format(quote_type) networking = salt.utils.stringutils.dequote(opts['networking']) if networking in valid: if networking in _CONFIG_TRUE: result['networking'] = true_val elif networking in _CONFIG_FALSE: result['networking'] = false_val else: _raise_error_network('networking', valid) if 'hostname' not in opts: try: opts['hostname'] = current['hostname'] _log_default_network('hostname', current['hostname']) except Exception: _raise_error_network('hostname', ['server1.example.com']) if opts['hostname']: result['hostname'] = '{1}{0}{1}'.format( salt.utils.stringutils.dequote(opts['hostname']), quote_type) else: _raise_error_network('hostname', ['server1.example.com']) if 'nozeroconf' in opts: nozeroconf = salt.utils.stringutils.dequote(opts['nozeroconf']) if nozeroconf in valid: if nozeroconf in _CONFIG_TRUE: result['nozeroconf'] = true_val elif nozeroconf in _CONFIG_FALSE: result['nozeroconf'] = false_val else: _raise_error_network('nozeroconf', valid) for opt in opts: if opt not in ['networking', 'hostname', 'nozeroconf']: result[opt] = '{1}{0}{1}'.format( salt.utils.stringutils.dequote(opts[opt]), quote_type) return result def _raise_error_iface(iface, option, expected): ''' Log and raise an error with a logical formatted message. ''' msg = _error_msg_iface(iface, option, expected) log.error(msg) raise AttributeError(msg) def _raise_error_network(option, expected): ''' Log and raise an error with a logical formatted message. ''' msg = _error_msg_network(option, expected) log.error(msg) raise AttributeError(msg) def _raise_error_routes(iface, option, expected): ''' Log and raise an error with a logical formatted message. ''' msg = _error_msg_routes(iface, option, expected) log.error(msg) raise AttributeError(msg) def _read_file(path): ''' Reads and returns the contents of a file ''' try: with salt.utils.files.fopen(path, 'rb') as rfh: lines = salt.utils.stringutils.to_unicode(rfh.read()).splitlines() try: lines.remove('') except ValueError: pass return lines except Exception: return [] # Return empty list for type consistency def _write_file_iface(iface, data, folder, pattern): ''' Writes a file to disk ''' filename = os.path.join(folder, pattern.format(iface)) if not os.path.exists(folder): msg = '{0} cannot be written. {1} does not exist' msg = msg.format(filename, folder) log.error(msg) raise AttributeError(msg) with salt.utils.files.fopen(filename, 'w') as fp_: fp_.write(salt.utils.stringutils.to_str(data)) def _write_file_network(data, filename): ''' Writes a file to disk ''' with salt.utils.files.fopen(filename, 'w') as fp_: fp_.write(salt.utils.stringutils.to_str(data)) def _read_temp(data): lines = data.splitlines() try: # Discard newlines if they exist lines.remove('') except ValueError: pass return lines def build_bond(iface, **settings): ''' Create a bond script in /etc/modprobe.d with the passed settings and load the bonding kernel module. CLI Example: .. code-block:: bash salt '*' ip.build_bond bond0 mode=balance-alb ''' rh_major = __grains__['osrelease'][:1] opts = _parse_settings_bond(settings, iface) try: template = JINJA.get_template('conf.jinja') except jinja2.exceptions.TemplateNotFound: log.error('Could not load template conf.jinja') return '' data = template.render({'name': iface, 'bonding': opts}) _write_file_iface(iface, data, _RH_NETWORK_CONF_FILES, '{0}.conf'.format(iface)) path = os.path.join(_RH_NETWORK_CONF_FILES, '{0}.conf'.format(iface)) if rh_major == '5': __salt__['cmd.run']( 'sed -i -e "/^alias\\s{0}.*/d" /etc/modprobe.conf'.format(iface), python_shell=False ) __salt__['cmd.run']( 'sed -i -e "/^options\\s{0}.*/d" /etc/modprobe.conf'.format(iface), python_shell=False ) __salt__['file.append']('/etc/modprobe.conf', path) __salt__['kmod.load']('bonding') if settings['test']: return _read_temp(data) return _read_file(path) def build_interface(iface, iface_type, enabled, **settings): ''' Build an interface script for a network interface. CLI Example: .. code-block:: bash salt '*' ip.build_interface eth0 eth <settings> ''' if __grains__['os'] == 'Fedora': if __grains__['osmajorrelease'] >= 18: rh_major = '7' else: rh_major = '6' else: rh_major = __grains__['osrelease'][:1] iface_type = iface_type.lower() if iface_type not in _IFACE_TYPES: _raise_error_iface(iface, iface_type, _IFACE_TYPES) if iface_type == 'slave': settings['slave'] = 'yes' if 'master' not in settings: msg = 'master is a required setting for slave interfaces' log.error(msg) raise AttributeError(msg) if iface_type == 'vlan': settings['vlan'] = 'yes' if iface_type == 'bridge': __salt__['pkg.install']('bridge-utils') if iface_type in ['eth', 'bond', 'bridge', 'slave', 'vlan', 'ipip', 'ib', 'alias']: opts = _parse_settings_eth(settings, iface_type, enabled, iface) try: template = JINJA.get_template('rh{0}_eth.jinja'.format(rh_major)) except jinja2.exceptions.TemplateNotFound: log.error( 'Could not load template rh%s_eth.jinja', rh_major ) return '' ifcfg = template.render(opts) if 'test' in settings and settings['test']: return _read_temp(ifcfg) _write_file_iface(iface, ifcfg, _RH_NETWORK_SCRIPT_DIR, 'ifcfg-{0}') path = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'ifcfg-{0}'.format(iface)) return _read_file(path) def build_routes(iface, **settings): ''' Build a route script for a network interface. CLI Example: .. code-block:: bash salt '*' ip.build_routes eth0 <settings> ''' template = 'rh6_route_eth.jinja' try: if int(__grains__['osrelease'][0]) < 6: template = 'route_eth.jinja' except ValueError: pass log.debug('Template name: %s', template) opts = _parse_routes(iface, settings) log.debug('Opts: \n %s', opts) try: template = JINJA.get_template(template) except jinja2.exceptions.TemplateNotFound: log.error('Could not load template %s', template) return '' opts6 = [] opts4 = [] for route in opts['routes']: ipaddr = route['ipaddr'] if salt.utils.validate.net.ipv6_addr(ipaddr): opts6.append(route) else: opts4.append(route) log.debug("IPv4 routes:\n%s", opts4) log.debug("IPv6 routes:\n%s", opts6) routecfg = template.render(routes=opts4, iface=iface) routecfg6 = template.render(routes=opts6, iface=iface) if settings['test']: routes = _read_temp(routecfg) routes.extend(_read_temp(routecfg6)) return routes _write_file_iface(iface, routecfg, _RH_NETWORK_SCRIPT_DIR, 'route-{0}') _write_file_iface(iface, routecfg6, _RH_NETWORK_SCRIPT_DIR, 'route6-{0}') path = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'route-{0}'.format(iface)) path6 = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'route6-{0}'.format(iface)) routes = _read_file(path) routes.extend(_read_file(path6)) return routes def down(iface, iface_type): ''' Shutdown a network interface CLI Example: .. code-block:: bash salt '*' ip.down eth0 ''' # Slave devices are controlled by the master. if iface_type not in ['slave']: return __salt__['cmd.run']('ifdown {0}'.format(iface)) return None def get_bond(iface): ''' Return the content of a bond script CLI Example: .. code-block:: bash salt '*' ip.get_bond bond0 ''' path = os.path.join(_RH_NETWORK_CONF_FILES, '{0}.conf'.format(iface)) return _read_file(path) def get_interface(iface): ''' Return the contents of an interface script CLI Example: .. code-block:: bash salt '*' ip.get_interface eth0 ''' path = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'ifcfg-{0}'.format(iface)) return _read_file(path) def up(iface, iface_type): # pylint: disable=C0103 ''' Start up a network interface CLI Example: .. code-block:: bash salt '*' ip.up eth0 ''' # Slave devices are controlled by the master. if iface_type not in ['slave']: return __salt__['cmd.run']('ifup {0}'.format(iface)) return None def get_routes(iface): ''' Return the contents of the interface routes script. CLI Example: .. code-block:: bash salt '*' ip.get_routes eth0 ''' path = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'route-{0}'.format(iface)) path6 = os.path.join(_RH_NETWORK_SCRIPT_DIR, 'route6-{0}'.format(iface)) routes = _read_file(path) routes.extend(_read_file(path6)) return routes def get_network_settings(): ''' Return the contents of the global network script. CLI Example: .. code-block:: bash salt '*' ip.get_network_settings ''' return _read_file(_RH_NETWORK_FILE) def apply_network_settings(**settings): ''' Apply global network configuration. CLI Example: .. code-block:: bash salt '*' ip.apply_network_settings ''' if 'require_reboot' not in settings: settings['require_reboot'] = False if 'apply_hostname' not in settings: settings['apply_hostname'] = False hostname_res = True if settings['apply_hostname'] in _CONFIG_TRUE: if 'hostname' in settings: hostname_res = __salt__['network.mod_hostname'](settings['hostname']) else: log.warning( 'The network state sls is trying to apply hostname ' 'changes but no hostname is defined.' ) hostname_res = False res = True if settings['require_reboot'] in _CONFIG_TRUE: log.warning( 'The network state sls is requiring a reboot of the system to ' 'properly apply network configuration.' ) res = True else: res = __salt__['service.restart']('network') return hostname_res and res
saltstack/salt
salt/modules/gnomedesktop.py
setIdleDelay
python
def setIdleDelay(delaySeconds, **kwargs): ''' Set the current idle delay setting in seconds CLI Example: .. code-block:: bash salt '*' gnome.setIdleDelay <seconds> user=<username> ''' _gsession = _GSettings(user=kwargs.get('user'), schema='org.gnome.desktop.session', key='idle-delay') return _gsession._set(delaySeconds)
Set the current idle delay setting in seconds CLI Example: .. code-block:: bash salt '*' gnome.setIdleDelay <seconds> user=<username>
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/gnomedesktop.py#L144-L158
[ "def _set(self, value):\n '''\n set the value for user in gsettings\n\n '''\n user = self.USER\n try:\n uid = pwd.getpwnam(user).pw_uid\n except KeyError:\n log.info('User does not exist')\n result = {}\n result['retcode'] = 1\n result['stdout'] = 'User {0} does not exist'.format(user)\n return result\n\n cmd = self.gsetting_command + ['set', self.SCHEMA, self.KEY, value]\n environ = {}\n environ['XDG_RUNTIME_DIR'] = '/run/user/{0}'.format(uid)\n result = __salt__['cmd.run_all'](cmd, runas=user, env=environ, python_shell=False)\n return result\n" ]
# -*- coding: utf-8 -*- ''' GNOME implementations ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import re import logging try: import pwd HAS_PWD = True except ImportError: HAS_PWD = False # Import Salt libs import salt.utils.path # Import 3rd-party libs try: from gi.repository import Gio, GLib # pylint: disable=W0611 HAS_GLIB = True except ImportError: HAS_GLIB = False log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'gnome' # Don't shadow built-in's. __func_alias__ = { 'set_': 'set' } def __virtual__(): ''' Only load if the Gio and Glib modules are available ''' if HAS_PWD and HAS_GLIB: return __virtualname__ return (False, 'The gnome_desktop execution module cannot be loaded: ' 'The Gio and GLib modules are not available') class _GSettings(object): def __init__(self, user, schema, key): self.SCHEMA = schema self.KEY = key self.USER = user self.UID = None self.HOME = None @property def gsetting_command(self): ''' return the command to run the gsettings binary ''' if salt.utils.path.which_bin(['dbus-run-session']): cmd = ['dbus-run-session', '--', 'gsettings'] else: cmd = ['dbus-launch', '--exit-with-session', 'gsettings'] return cmd def _get(self): ''' get the value for user in gsettings ''' user = self.USER try: uid = pwd.getpwnam(user).pw_uid except KeyError: log.info('User does not exist') return False cmd = self.gsetting_command + ['get', str(self.SCHEMA), str(self.KEY)] environ = {} environ['XDG_RUNTIME_DIR'] = '/run/user/{0}'.format(uid) result = __salt__['cmd.run_all'](cmd, runas=user, env=environ, python_shell=False) if 'stdout' in result: if 'uint32' in result['stdout']: return re.sub('uint32 ', '', result['stdout']) else: return result['stdout'] else: return False def _set(self, value): ''' set the value for user in gsettings ''' user = self.USER try: uid = pwd.getpwnam(user).pw_uid except KeyError: log.info('User does not exist') result = {} result['retcode'] = 1 result['stdout'] = 'User {0} does not exist'.format(user) return result cmd = self.gsetting_command + ['set', self.SCHEMA, self.KEY, value] environ = {} environ['XDG_RUNTIME_DIR'] = '/run/user/{0}'.format(uid) result = __salt__['cmd.run_all'](cmd, runas=user, env=environ, python_shell=False) return result def ping(**kwargs): ''' A test to ensure the GNOME module is loaded CLI Example: .. code-block:: bash salt '*' gnome.ping user=<username> ''' return True def getIdleDelay(**kwargs): ''' Return the current idle delay setting in seconds CLI Example: .. code-block:: bash salt '*' gnome.getIdleDelay user=<username> ''' _gsession = _GSettings(user=kwargs.get('user'), schema='org.gnome.desktop.session', key='idle-delay') return _gsession._get() def getClockFormat(**kwargs): ''' Return the current clock format, either 12h or 24h format. CLI Example: .. code-block:: bash salt '*' gnome.getClockFormat user=<username> ''' _gsession = _GSettings(user=kwargs.get('user'), schema='org.gnome.desktop.interface', key='clock-format') return _gsession._get() def setClockFormat(clockFormat, **kwargs): ''' Set the clock format, either 12h or 24h format. CLI Example: .. code-block:: bash salt '*' gnome.setClockFormat <12h|24h> user=<username> ''' if clockFormat != '12h' and clockFormat != '24h': return False _gsession = _GSettings(user=kwargs.get('user'), schema='org.gnome.desktop.interface', key='clock-format') return _gsession._set(clockFormat) def getClockShowDate(**kwargs): ''' Return the current setting, if the date is shown in the clock CLI Example: .. code-block:: bash salt '*' gnome.getClockShowDate user=<username> ''' _gsession = _GSettings(user=kwargs.get('user'), schema='org.gnome.desktop.interface', key='clock-show-date') return _gsession._get() def setClockShowDate(kvalue, **kwargs): ''' Set whether the date is visible in the clock CLI Example: .. code-block:: bash salt '*' gnome.setClockShowDate <True|False> user=<username> ''' if kvalue is not True and kvalue is not False: return False _gsession = _GSettings(user=kwargs.get('user'), schema='org.gnome.desktop.interface', key='clock-show-date') return _gsession._set(kvalue) def getIdleActivation(**kwargs): ''' Get whether the idle activation is enabled CLI Example: .. code-block:: bash salt '*' gnome.getIdleActivation user=<username> ''' _gsession = _GSettings(user=kwargs.get('user'), schema='org.gnome.desktop.screensaver', key='idle-activation-enabled') return _gsession._get() def setIdleActivation(kvalue, **kwargs): ''' Set whether the idle activation is enabled CLI Example: .. code-block:: bash salt '*' gnome.setIdleActivation <True|False> user=<username> ''' if kvalue is not True and kvalue is not False: return False _gsession = _GSettings(user=kwargs.get('user'), schema='org.gnome.desktop.screensaver', key='idle-activation-enabled') return _gsession._set(kvalue) def get(schema=None, key=None, user=None, **kwargs): ''' Get key in a particular GNOME schema CLI Example: .. code-block:: bash salt '*' gnome.get user=<username> schema=org.gnome.desktop.screensaver key=idle-activation-enabled ''' _gsession = _GSettings(user=user, schema=schema, key=key) return _gsession._get() def set_(schema=None, key=None, user=None, value=None, **kwargs): ''' Set key in a particular GNOME schema CLI Example: .. code-block:: bash salt '*' gnome.set user=<username> schema=org.gnome.desktop.screensaver key=idle-activation-enabled value=False ''' _gsession = _GSettings(user=user, schema=schema, key=key) return _gsession._set(value)
saltstack/salt
salt/modules/gnomedesktop.py
setClockFormat
python
def setClockFormat(clockFormat, **kwargs): ''' Set the clock format, either 12h or 24h format. CLI Example: .. code-block:: bash salt '*' gnome.setClockFormat <12h|24h> user=<username> ''' if clockFormat != '12h' and clockFormat != '24h': return False _gsession = _GSettings(user=kwargs.get('user'), schema='org.gnome.desktop.interface', key='clock-format') return _gsession._set(clockFormat)
Set the clock format, either 12h or 24h format. CLI Example: .. code-block:: bash salt '*' gnome.setClockFormat <12h|24h> user=<username>
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/gnomedesktop.py#L178-L194
[ "def _set(self, value):\n '''\n set the value for user in gsettings\n\n '''\n user = self.USER\n try:\n uid = pwd.getpwnam(user).pw_uid\n except KeyError:\n log.info('User does not exist')\n result = {}\n result['retcode'] = 1\n result['stdout'] = 'User {0} does not exist'.format(user)\n return result\n\n cmd = self.gsetting_command + ['set', self.SCHEMA, self.KEY, value]\n environ = {}\n environ['XDG_RUNTIME_DIR'] = '/run/user/{0}'.format(uid)\n result = __salt__['cmd.run_all'](cmd, runas=user, env=environ, python_shell=False)\n return result\n" ]
# -*- coding: utf-8 -*- ''' GNOME implementations ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import re import logging try: import pwd HAS_PWD = True except ImportError: HAS_PWD = False # Import Salt libs import salt.utils.path # Import 3rd-party libs try: from gi.repository import Gio, GLib # pylint: disable=W0611 HAS_GLIB = True except ImportError: HAS_GLIB = False log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'gnome' # Don't shadow built-in's. __func_alias__ = { 'set_': 'set' } def __virtual__(): ''' Only load if the Gio and Glib modules are available ''' if HAS_PWD and HAS_GLIB: return __virtualname__ return (False, 'The gnome_desktop execution module cannot be loaded: ' 'The Gio and GLib modules are not available') class _GSettings(object): def __init__(self, user, schema, key): self.SCHEMA = schema self.KEY = key self.USER = user self.UID = None self.HOME = None @property def gsetting_command(self): ''' return the command to run the gsettings binary ''' if salt.utils.path.which_bin(['dbus-run-session']): cmd = ['dbus-run-session', '--', 'gsettings'] else: cmd = ['dbus-launch', '--exit-with-session', 'gsettings'] return cmd def _get(self): ''' get the value for user in gsettings ''' user = self.USER try: uid = pwd.getpwnam(user).pw_uid except KeyError: log.info('User does not exist') return False cmd = self.gsetting_command + ['get', str(self.SCHEMA), str(self.KEY)] environ = {} environ['XDG_RUNTIME_DIR'] = '/run/user/{0}'.format(uid) result = __salt__['cmd.run_all'](cmd, runas=user, env=environ, python_shell=False) if 'stdout' in result: if 'uint32' in result['stdout']: return re.sub('uint32 ', '', result['stdout']) else: return result['stdout'] else: return False def _set(self, value): ''' set the value for user in gsettings ''' user = self.USER try: uid = pwd.getpwnam(user).pw_uid except KeyError: log.info('User does not exist') result = {} result['retcode'] = 1 result['stdout'] = 'User {0} does not exist'.format(user) return result cmd = self.gsetting_command + ['set', self.SCHEMA, self.KEY, value] environ = {} environ['XDG_RUNTIME_DIR'] = '/run/user/{0}'.format(uid) result = __salt__['cmd.run_all'](cmd, runas=user, env=environ, python_shell=False) return result def ping(**kwargs): ''' A test to ensure the GNOME module is loaded CLI Example: .. code-block:: bash salt '*' gnome.ping user=<username> ''' return True def getIdleDelay(**kwargs): ''' Return the current idle delay setting in seconds CLI Example: .. code-block:: bash salt '*' gnome.getIdleDelay user=<username> ''' _gsession = _GSettings(user=kwargs.get('user'), schema='org.gnome.desktop.session', key='idle-delay') return _gsession._get() def setIdleDelay(delaySeconds, **kwargs): ''' Set the current idle delay setting in seconds CLI Example: .. code-block:: bash salt '*' gnome.setIdleDelay <seconds> user=<username> ''' _gsession = _GSettings(user=kwargs.get('user'), schema='org.gnome.desktop.session', key='idle-delay') return _gsession._set(delaySeconds) def getClockFormat(**kwargs): ''' Return the current clock format, either 12h or 24h format. CLI Example: .. code-block:: bash salt '*' gnome.getClockFormat user=<username> ''' _gsession = _GSettings(user=kwargs.get('user'), schema='org.gnome.desktop.interface', key='clock-format') return _gsession._get() def getClockShowDate(**kwargs): ''' Return the current setting, if the date is shown in the clock CLI Example: .. code-block:: bash salt '*' gnome.getClockShowDate user=<username> ''' _gsession = _GSettings(user=kwargs.get('user'), schema='org.gnome.desktop.interface', key='clock-show-date') return _gsession._get() def setClockShowDate(kvalue, **kwargs): ''' Set whether the date is visible in the clock CLI Example: .. code-block:: bash salt '*' gnome.setClockShowDate <True|False> user=<username> ''' if kvalue is not True and kvalue is not False: return False _gsession = _GSettings(user=kwargs.get('user'), schema='org.gnome.desktop.interface', key='clock-show-date') return _gsession._set(kvalue) def getIdleActivation(**kwargs): ''' Get whether the idle activation is enabled CLI Example: .. code-block:: bash salt '*' gnome.getIdleActivation user=<username> ''' _gsession = _GSettings(user=kwargs.get('user'), schema='org.gnome.desktop.screensaver', key='idle-activation-enabled') return _gsession._get() def setIdleActivation(kvalue, **kwargs): ''' Set whether the idle activation is enabled CLI Example: .. code-block:: bash salt '*' gnome.setIdleActivation <True|False> user=<username> ''' if kvalue is not True and kvalue is not False: return False _gsession = _GSettings(user=kwargs.get('user'), schema='org.gnome.desktop.screensaver', key='idle-activation-enabled') return _gsession._set(kvalue) def get(schema=None, key=None, user=None, **kwargs): ''' Get key in a particular GNOME schema CLI Example: .. code-block:: bash salt '*' gnome.get user=<username> schema=org.gnome.desktop.screensaver key=idle-activation-enabled ''' _gsession = _GSettings(user=user, schema=schema, key=key) return _gsession._get() def set_(schema=None, key=None, user=None, value=None, **kwargs): ''' Set key in a particular GNOME schema CLI Example: .. code-block:: bash salt '*' gnome.set user=<username> schema=org.gnome.desktop.screensaver key=idle-activation-enabled value=False ''' _gsession = _GSettings(user=user, schema=schema, key=key) return _gsession._set(value)
saltstack/salt
salt/modules/gnomedesktop.py
setClockShowDate
python
def setClockShowDate(kvalue, **kwargs): ''' Set whether the date is visible in the clock CLI Example: .. code-block:: bash salt '*' gnome.setClockShowDate <True|False> user=<username> ''' if kvalue is not True and kvalue is not False: return False _gsession = _GSettings(user=kwargs.get('user'), schema='org.gnome.desktop.interface', key='clock-show-date') return _gsession._set(kvalue)
Set whether the date is visible in the clock CLI Example: .. code-block:: bash salt '*' gnome.setClockShowDate <True|False> user=<username>
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/gnomedesktop.py#L214-L230
[ "def _set(self, value):\n '''\n set the value for user in gsettings\n\n '''\n user = self.USER\n try:\n uid = pwd.getpwnam(user).pw_uid\n except KeyError:\n log.info('User does not exist')\n result = {}\n result['retcode'] = 1\n result['stdout'] = 'User {0} does not exist'.format(user)\n return result\n\n cmd = self.gsetting_command + ['set', self.SCHEMA, self.KEY, value]\n environ = {}\n environ['XDG_RUNTIME_DIR'] = '/run/user/{0}'.format(uid)\n result = __salt__['cmd.run_all'](cmd, runas=user, env=environ, python_shell=False)\n return result\n" ]
# -*- coding: utf-8 -*- ''' GNOME implementations ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import re import logging try: import pwd HAS_PWD = True except ImportError: HAS_PWD = False # Import Salt libs import salt.utils.path # Import 3rd-party libs try: from gi.repository import Gio, GLib # pylint: disable=W0611 HAS_GLIB = True except ImportError: HAS_GLIB = False log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'gnome' # Don't shadow built-in's. __func_alias__ = { 'set_': 'set' } def __virtual__(): ''' Only load if the Gio and Glib modules are available ''' if HAS_PWD and HAS_GLIB: return __virtualname__ return (False, 'The gnome_desktop execution module cannot be loaded: ' 'The Gio and GLib modules are not available') class _GSettings(object): def __init__(self, user, schema, key): self.SCHEMA = schema self.KEY = key self.USER = user self.UID = None self.HOME = None @property def gsetting_command(self): ''' return the command to run the gsettings binary ''' if salt.utils.path.which_bin(['dbus-run-session']): cmd = ['dbus-run-session', '--', 'gsettings'] else: cmd = ['dbus-launch', '--exit-with-session', 'gsettings'] return cmd def _get(self): ''' get the value for user in gsettings ''' user = self.USER try: uid = pwd.getpwnam(user).pw_uid except KeyError: log.info('User does not exist') return False cmd = self.gsetting_command + ['get', str(self.SCHEMA), str(self.KEY)] environ = {} environ['XDG_RUNTIME_DIR'] = '/run/user/{0}'.format(uid) result = __salt__['cmd.run_all'](cmd, runas=user, env=environ, python_shell=False) if 'stdout' in result: if 'uint32' in result['stdout']: return re.sub('uint32 ', '', result['stdout']) else: return result['stdout'] else: return False def _set(self, value): ''' set the value for user in gsettings ''' user = self.USER try: uid = pwd.getpwnam(user).pw_uid except KeyError: log.info('User does not exist') result = {} result['retcode'] = 1 result['stdout'] = 'User {0} does not exist'.format(user) return result cmd = self.gsetting_command + ['set', self.SCHEMA, self.KEY, value] environ = {} environ['XDG_RUNTIME_DIR'] = '/run/user/{0}'.format(uid) result = __salt__['cmd.run_all'](cmd, runas=user, env=environ, python_shell=False) return result def ping(**kwargs): ''' A test to ensure the GNOME module is loaded CLI Example: .. code-block:: bash salt '*' gnome.ping user=<username> ''' return True def getIdleDelay(**kwargs): ''' Return the current idle delay setting in seconds CLI Example: .. code-block:: bash salt '*' gnome.getIdleDelay user=<username> ''' _gsession = _GSettings(user=kwargs.get('user'), schema='org.gnome.desktop.session', key='idle-delay') return _gsession._get() def setIdleDelay(delaySeconds, **kwargs): ''' Set the current idle delay setting in seconds CLI Example: .. code-block:: bash salt '*' gnome.setIdleDelay <seconds> user=<username> ''' _gsession = _GSettings(user=kwargs.get('user'), schema='org.gnome.desktop.session', key='idle-delay') return _gsession._set(delaySeconds) def getClockFormat(**kwargs): ''' Return the current clock format, either 12h or 24h format. CLI Example: .. code-block:: bash salt '*' gnome.getClockFormat user=<username> ''' _gsession = _GSettings(user=kwargs.get('user'), schema='org.gnome.desktop.interface', key='clock-format') return _gsession._get() def setClockFormat(clockFormat, **kwargs): ''' Set the clock format, either 12h or 24h format. CLI Example: .. code-block:: bash salt '*' gnome.setClockFormat <12h|24h> user=<username> ''' if clockFormat != '12h' and clockFormat != '24h': return False _gsession = _GSettings(user=kwargs.get('user'), schema='org.gnome.desktop.interface', key='clock-format') return _gsession._set(clockFormat) def getClockShowDate(**kwargs): ''' Return the current setting, if the date is shown in the clock CLI Example: .. code-block:: bash salt '*' gnome.getClockShowDate user=<username> ''' _gsession = _GSettings(user=kwargs.get('user'), schema='org.gnome.desktop.interface', key='clock-show-date') return _gsession._get() def getIdleActivation(**kwargs): ''' Get whether the idle activation is enabled CLI Example: .. code-block:: bash salt '*' gnome.getIdleActivation user=<username> ''' _gsession = _GSettings(user=kwargs.get('user'), schema='org.gnome.desktop.screensaver', key='idle-activation-enabled') return _gsession._get() def setIdleActivation(kvalue, **kwargs): ''' Set whether the idle activation is enabled CLI Example: .. code-block:: bash salt '*' gnome.setIdleActivation <True|False> user=<username> ''' if kvalue is not True and kvalue is not False: return False _gsession = _GSettings(user=kwargs.get('user'), schema='org.gnome.desktop.screensaver', key='idle-activation-enabled') return _gsession._set(kvalue) def get(schema=None, key=None, user=None, **kwargs): ''' Get key in a particular GNOME schema CLI Example: .. code-block:: bash salt '*' gnome.get user=<username> schema=org.gnome.desktop.screensaver key=idle-activation-enabled ''' _gsession = _GSettings(user=user, schema=schema, key=key) return _gsession._get() def set_(schema=None, key=None, user=None, value=None, **kwargs): ''' Set key in a particular GNOME schema CLI Example: .. code-block:: bash salt '*' gnome.set user=<username> schema=org.gnome.desktop.screensaver key=idle-activation-enabled value=False ''' _gsession = _GSettings(user=user, schema=schema, key=key) return _gsession._set(value)
saltstack/salt
salt/modules/gnomedesktop.py
getIdleActivation
python
def getIdleActivation(**kwargs): ''' Get whether the idle activation is enabled CLI Example: .. code-block:: bash salt '*' gnome.getIdleActivation user=<username> ''' _gsession = _GSettings(user=kwargs.get('user'), schema='org.gnome.desktop.screensaver', key='idle-activation-enabled') return _gsession._get()
Get whether the idle activation is enabled CLI Example: .. code-block:: bash salt '*' gnome.getIdleActivation user=<username>
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/gnomedesktop.py#L233-L247
[ "def _get(self):\n '''\n get the value for user in gsettings\n\n '''\n user = self.USER\n try:\n uid = pwd.getpwnam(user).pw_uid\n except KeyError:\n log.info('User does not exist')\n return False\n\n cmd = self.gsetting_command + ['get', str(self.SCHEMA), str(self.KEY)]\n environ = {}\n environ['XDG_RUNTIME_DIR'] = '/run/user/{0}'.format(uid)\n result = __salt__['cmd.run_all'](cmd, runas=user, env=environ, python_shell=False)\n\n if 'stdout' in result:\n if 'uint32' in result['stdout']:\n return re.sub('uint32 ', '', result['stdout'])\n else:\n return result['stdout']\n else:\n return False\n" ]
# -*- coding: utf-8 -*- ''' GNOME implementations ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import re import logging try: import pwd HAS_PWD = True except ImportError: HAS_PWD = False # Import Salt libs import salt.utils.path # Import 3rd-party libs try: from gi.repository import Gio, GLib # pylint: disable=W0611 HAS_GLIB = True except ImportError: HAS_GLIB = False log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'gnome' # Don't shadow built-in's. __func_alias__ = { 'set_': 'set' } def __virtual__(): ''' Only load if the Gio and Glib modules are available ''' if HAS_PWD and HAS_GLIB: return __virtualname__ return (False, 'The gnome_desktop execution module cannot be loaded: ' 'The Gio and GLib modules are not available') class _GSettings(object): def __init__(self, user, schema, key): self.SCHEMA = schema self.KEY = key self.USER = user self.UID = None self.HOME = None @property def gsetting_command(self): ''' return the command to run the gsettings binary ''' if salt.utils.path.which_bin(['dbus-run-session']): cmd = ['dbus-run-session', '--', 'gsettings'] else: cmd = ['dbus-launch', '--exit-with-session', 'gsettings'] return cmd def _get(self): ''' get the value for user in gsettings ''' user = self.USER try: uid = pwd.getpwnam(user).pw_uid except KeyError: log.info('User does not exist') return False cmd = self.gsetting_command + ['get', str(self.SCHEMA), str(self.KEY)] environ = {} environ['XDG_RUNTIME_DIR'] = '/run/user/{0}'.format(uid) result = __salt__['cmd.run_all'](cmd, runas=user, env=environ, python_shell=False) if 'stdout' in result: if 'uint32' in result['stdout']: return re.sub('uint32 ', '', result['stdout']) else: return result['stdout'] else: return False def _set(self, value): ''' set the value for user in gsettings ''' user = self.USER try: uid = pwd.getpwnam(user).pw_uid except KeyError: log.info('User does not exist') result = {} result['retcode'] = 1 result['stdout'] = 'User {0} does not exist'.format(user) return result cmd = self.gsetting_command + ['set', self.SCHEMA, self.KEY, value] environ = {} environ['XDG_RUNTIME_DIR'] = '/run/user/{0}'.format(uid) result = __salt__['cmd.run_all'](cmd, runas=user, env=environ, python_shell=False) return result def ping(**kwargs): ''' A test to ensure the GNOME module is loaded CLI Example: .. code-block:: bash salt '*' gnome.ping user=<username> ''' return True def getIdleDelay(**kwargs): ''' Return the current idle delay setting in seconds CLI Example: .. code-block:: bash salt '*' gnome.getIdleDelay user=<username> ''' _gsession = _GSettings(user=kwargs.get('user'), schema='org.gnome.desktop.session', key='idle-delay') return _gsession._get() def setIdleDelay(delaySeconds, **kwargs): ''' Set the current idle delay setting in seconds CLI Example: .. code-block:: bash salt '*' gnome.setIdleDelay <seconds> user=<username> ''' _gsession = _GSettings(user=kwargs.get('user'), schema='org.gnome.desktop.session', key='idle-delay') return _gsession._set(delaySeconds) def getClockFormat(**kwargs): ''' Return the current clock format, either 12h or 24h format. CLI Example: .. code-block:: bash salt '*' gnome.getClockFormat user=<username> ''' _gsession = _GSettings(user=kwargs.get('user'), schema='org.gnome.desktop.interface', key='clock-format') return _gsession._get() def setClockFormat(clockFormat, **kwargs): ''' Set the clock format, either 12h or 24h format. CLI Example: .. code-block:: bash salt '*' gnome.setClockFormat <12h|24h> user=<username> ''' if clockFormat != '12h' and clockFormat != '24h': return False _gsession = _GSettings(user=kwargs.get('user'), schema='org.gnome.desktop.interface', key='clock-format') return _gsession._set(clockFormat) def getClockShowDate(**kwargs): ''' Return the current setting, if the date is shown in the clock CLI Example: .. code-block:: bash salt '*' gnome.getClockShowDate user=<username> ''' _gsession = _GSettings(user=kwargs.get('user'), schema='org.gnome.desktop.interface', key='clock-show-date') return _gsession._get() def setClockShowDate(kvalue, **kwargs): ''' Set whether the date is visible in the clock CLI Example: .. code-block:: bash salt '*' gnome.setClockShowDate <True|False> user=<username> ''' if kvalue is not True and kvalue is not False: return False _gsession = _GSettings(user=kwargs.get('user'), schema='org.gnome.desktop.interface', key='clock-show-date') return _gsession._set(kvalue) def setIdleActivation(kvalue, **kwargs): ''' Set whether the idle activation is enabled CLI Example: .. code-block:: bash salt '*' gnome.setIdleActivation <True|False> user=<username> ''' if kvalue is not True and kvalue is not False: return False _gsession = _GSettings(user=kwargs.get('user'), schema='org.gnome.desktop.screensaver', key='idle-activation-enabled') return _gsession._set(kvalue) def get(schema=None, key=None, user=None, **kwargs): ''' Get key in a particular GNOME schema CLI Example: .. code-block:: bash salt '*' gnome.get user=<username> schema=org.gnome.desktop.screensaver key=idle-activation-enabled ''' _gsession = _GSettings(user=user, schema=schema, key=key) return _gsession._get() def set_(schema=None, key=None, user=None, value=None, **kwargs): ''' Set key in a particular GNOME schema CLI Example: .. code-block:: bash salt '*' gnome.set user=<username> schema=org.gnome.desktop.screensaver key=idle-activation-enabled value=False ''' _gsession = _GSettings(user=user, schema=schema, key=key) return _gsession._set(value)
saltstack/salt
salt/modules/gnomedesktop.py
get
python
def get(schema=None, key=None, user=None, **kwargs): ''' Get key in a particular GNOME schema CLI Example: .. code-block:: bash salt '*' gnome.get user=<username> schema=org.gnome.desktop.screensaver key=idle-activation-enabled ''' _gsession = _GSettings(user=user, schema=schema, key=key) return _gsession._get()
Get key in a particular GNOME schema CLI Example: .. code-block:: bash salt '*' gnome.get user=<username> schema=org.gnome.desktop.screensaver key=idle-activation-enabled
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/gnomedesktop.py#L269-L281
[ "def _get(self):\n '''\n get the value for user in gsettings\n\n '''\n user = self.USER\n try:\n uid = pwd.getpwnam(user).pw_uid\n except KeyError:\n log.info('User does not exist')\n return False\n\n cmd = self.gsetting_command + ['get', str(self.SCHEMA), str(self.KEY)]\n environ = {}\n environ['XDG_RUNTIME_DIR'] = '/run/user/{0}'.format(uid)\n result = __salt__['cmd.run_all'](cmd, runas=user, env=environ, python_shell=False)\n\n if 'stdout' in result:\n if 'uint32' in result['stdout']:\n return re.sub('uint32 ', '', result['stdout'])\n else:\n return result['stdout']\n else:\n return False\n" ]
# -*- coding: utf-8 -*- ''' GNOME implementations ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import re import logging try: import pwd HAS_PWD = True except ImportError: HAS_PWD = False # Import Salt libs import salt.utils.path # Import 3rd-party libs try: from gi.repository import Gio, GLib # pylint: disable=W0611 HAS_GLIB = True except ImportError: HAS_GLIB = False log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'gnome' # Don't shadow built-in's. __func_alias__ = { 'set_': 'set' } def __virtual__(): ''' Only load if the Gio and Glib modules are available ''' if HAS_PWD and HAS_GLIB: return __virtualname__ return (False, 'The gnome_desktop execution module cannot be loaded: ' 'The Gio and GLib modules are not available') class _GSettings(object): def __init__(self, user, schema, key): self.SCHEMA = schema self.KEY = key self.USER = user self.UID = None self.HOME = None @property def gsetting_command(self): ''' return the command to run the gsettings binary ''' if salt.utils.path.which_bin(['dbus-run-session']): cmd = ['dbus-run-session', '--', 'gsettings'] else: cmd = ['dbus-launch', '--exit-with-session', 'gsettings'] return cmd def _get(self): ''' get the value for user in gsettings ''' user = self.USER try: uid = pwd.getpwnam(user).pw_uid except KeyError: log.info('User does not exist') return False cmd = self.gsetting_command + ['get', str(self.SCHEMA), str(self.KEY)] environ = {} environ['XDG_RUNTIME_DIR'] = '/run/user/{0}'.format(uid) result = __salt__['cmd.run_all'](cmd, runas=user, env=environ, python_shell=False) if 'stdout' in result: if 'uint32' in result['stdout']: return re.sub('uint32 ', '', result['stdout']) else: return result['stdout'] else: return False def _set(self, value): ''' set the value for user in gsettings ''' user = self.USER try: uid = pwd.getpwnam(user).pw_uid except KeyError: log.info('User does not exist') result = {} result['retcode'] = 1 result['stdout'] = 'User {0} does not exist'.format(user) return result cmd = self.gsetting_command + ['set', self.SCHEMA, self.KEY, value] environ = {} environ['XDG_RUNTIME_DIR'] = '/run/user/{0}'.format(uid) result = __salt__['cmd.run_all'](cmd, runas=user, env=environ, python_shell=False) return result def ping(**kwargs): ''' A test to ensure the GNOME module is loaded CLI Example: .. code-block:: bash salt '*' gnome.ping user=<username> ''' return True def getIdleDelay(**kwargs): ''' Return the current idle delay setting in seconds CLI Example: .. code-block:: bash salt '*' gnome.getIdleDelay user=<username> ''' _gsession = _GSettings(user=kwargs.get('user'), schema='org.gnome.desktop.session', key='idle-delay') return _gsession._get() def setIdleDelay(delaySeconds, **kwargs): ''' Set the current idle delay setting in seconds CLI Example: .. code-block:: bash salt '*' gnome.setIdleDelay <seconds> user=<username> ''' _gsession = _GSettings(user=kwargs.get('user'), schema='org.gnome.desktop.session', key='idle-delay') return _gsession._set(delaySeconds) def getClockFormat(**kwargs): ''' Return the current clock format, either 12h or 24h format. CLI Example: .. code-block:: bash salt '*' gnome.getClockFormat user=<username> ''' _gsession = _GSettings(user=kwargs.get('user'), schema='org.gnome.desktop.interface', key='clock-format') return _gsession._get() def setClockFormat(clockFormat, **kwargs): ''' Set the clock format, either 12h or 24h format. CLI Example: .. code-block:: bash salt '*' gnome.setClockFormat <12h|24h> user=<username> ''' if clockFormat != '12h' and clockFormat != '24h': return False _gsession = _GSettings(user=kwargs.get('user'), schema='org.gnome.desktop.interface', key='clock-format') return _gsession._set(clockFormat) def getClockShowDate(**kwargs): ''' Return the current setting, if the date is shown in the clock CLI Example: .. code-block:: bash salt '*' gnome.getClockShowDate user=<username> ''' _gsession = _GSettings(user=kwargs.get('user'), schema='org.gnome.desktop.interface', key='clock-show-date') return _gsession._get() def setClockShowDate(kvalue, **kwargs): ''' Set whether the date is visible in the clock CLI Example: .. code-block:: bash salt '*' gnome.setClockShowDate <True|False> user=<username> ''' if kvalue is not True and kvalue is not False: return False _gsession = _GSettings(user=kwargs.get('user'), schema='org.gnome.desktop.interface', key='clock-show-date') return _gsession._set(kvalue) def getIdleActivation(**kwargs): ''' Get whether the idle activation is enabled CLI Example: .. code-block:: bash salt '*' gnome.getIdleActivation user=<username> ''' _gsession = _GSettings(user=kwargs.get('user'), schema='org.gnome.desktop.screensaver', key='idle-activation-enabled') return _gsession._get() def setIdleActivation(kvalue, **kwargs): ''' Set whether the idle activation is enabled CLI Example: .. code-block:: bash salt '*' gnome.setIdleActivation <True|False> user=<username> ''' if kvalue is not True and kvalue is not False: return False _gsession = _GSettings(user=kwargs.get('user'), schema='org.gnome.desktop.screensaver', key='idle-activation-enabled') return _gsession._set(kvalue) def set_(schema=None, key=None, user=None, value=None, **kwargs): ''' Set key in a particular GNOME schema CLI Example: .. code-block:: bash salt '*' gnome.set user=<username> schema=org.gnome.desktop.screensaver key=idle-activation-enabled value=False ''' _gsession = _GSettings(user=user, schema=schema, key=key) return _gsession._set(value)
saltstack/salt
salt/modules/gnomedesktop.py
set_
python
def set_(schema=None, key=None, user=None, value=None, **kwargs): ''' Set key in a particular GNOME schema CLI Example: .. code-block:: bash salt '*' gnome.set user=<username> schema=org.gnome.desktop.screensaver key=idle-activation-enabled value=False ''' _gsession = _GSettings(user=user, schema=schema, key=key) return _gsession._set(value)
Set key in a particular GNOME schema CLI Example: .. code-block:: bash salt '*' gnome.set user=<username> schema=org.gnome.desktop.screensaver key=idle-activation-enabled value=False
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/gnomedesktop.py#L284-L296
[ "def _set(self, value):\n '''\n set the value for user in gsettings\n\n '''\n user = self.USER\n try:\n uid = pwd.getpwnam(user).pw_uid\n except KeyError:\n log.info('User does not exist')\n result = {}\n result['retcode'] = 1\n result['stdout'] = 'User {0} does not exist'.format(user)\n return result\n\n cmd = self.gsetting_command + ['set', self.SCHEMA, self.KEY, value]\n environ = {}\n environ['XDG_RUNTIME_DIR'] = '/run/user/{0}'.format(uid)\n result = __salt__['cmd.run_all'](cmd, runas=user, env=environ, python_shell=False)\n return result\n" ]
# -*- coding: utf-8 -*- ''' GNOME implementations ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import re import logging try: import pwd HAS_PWD = True except ImportError: HAS_PWD = False # Import Salt libs import salt.utils.path # Import 3rd-party libs try: from gi.repository import Gio, GLib # pylint: disable=W0611 HAS_GLIB = True except ImportError: HAS_GLIB = False log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'gnome' # Don't shadow built-in's. __func_alias__ = { 'set_': 'set' } def __virtual__(): ''' Only load if the Gio and Glib modules are available ''' if HAS_PWD and HAS_GLIB: return __virtualname__ return (False, 'The gnome_desktop execution module cannot be loaded: ' 'The Gio and GLib modules are not available') class _GSettings(object): def __init__(self, user, schema, key): self.SCHEMA = schema self.KEY = key self.USER = user self.UID = None self.HOME = None @property def gsetting_command(self): ''' return the command to run the gsettings binary ''' if salt.utils.path.which_bin(['dbus-run-session']): cmd = ['dbus-run-session', '--', 'gsettings'] else: cmd = ['dbus-launch', '--exit-with-session', 'gsettings'] return cmd def _get(self): ''' get the value for user in gsettings ''' user = self.USER try: uid = pwd.getpwnam(user).pw_uid except KeyError: log.info('User does not exist') return False cmd = self.gsetting_command + ['get', str(self.SCHEMA), str(self.KEY)] environ = {} environ['XDG_RUNTIME_DIR'] = '/run/user/{0}'.format(uid) result = __salt__['cmd.run_all'](cmd, runas=user, env=environ, python_shell=False) if 'stdout' in result: if 'uint32' in result['stdout']: return re.sub('uint32 ', '', result['stdout']) else: return result['stdout'] else: return False def _set(self, value): ''' set the value for user in gsettings ''' user = self.USER try: uid = pwd.getpwnam(user).pw_uid except KeyError: log.info('User does not exist') result = {} result['retcode'] = 1 result['stdout'] = 'User {0} does not exist'.format(user) return result cmd = self.gsetting_command + ['set', self.SCHEMA, self.KEY, value] environ = {} environ['XDG_RUNTIME_DIR'] = '/run/user/{0}'.format(uid) result = __salt__['cmd.run_all'](cmd, runas=user, env=environ, python_shell=False) return result def ping(**kwargs): ''' A test to ensure the GNOME module is loaded CLI Example: .. code-block:: bash salt '*' gnome.ping user=<username> ''' return True def getIdleDelay(**kwargs): ''' Return the current idle delay setting in seconds CLI Example: .. code-block:: bash salt '*' gnome.getIdleDelay user=<username> ''' _gsession = _GSettings(user=kwargs.get('user'), schema='org.gnome.desktop.session', key='idle-delay') return _gsession._get() def setIdleDelay(delaySeconds, **kwargs): ''' Set the current idle delay setting in seconds CLI Example: .. code-block:: bash salt '*' gnome.setIdleDelay <seconds> user=<username> ''' _gsession = _GSettings(user=kwargs.get('user'), schema='org.gnome.desktop.session', key='idle-delay') return _gsession._set(delaySeconds) def getClockFormat(**kwargs): ''' Return the current clock format, either 12h or 24h format. CLI Example: .. code-block:: bash salt '*' gnome.getClockFormat user=<username> ''' _gsession = _GSettings(user=kwargs.get('user'), schema='org.gnome.desktop.interface', key='clock-format') return _gsession._get() def setClockFormat(clockFormat, **kwargs): ''' Set the clock format, either 12h or 24h format. CLI Example: .. code-block:: bash salt '*' gnome.setClockFormat <12h|24h> user=<username> ''' if clockFormat != '12h' and clockFormat != '24h': return False _gsession = _GSettings(user=kwargs.get('user'), schema='org.gnome.desktop.interface', key='clock-format') return _gsession._set(clockFormat) def getClockShowDate(**kwargs): ''' Return the current setting, if the date is shown in the clock CLI Example: .. code-block:: bash salt '*' gnome.getClockShowDate user=<username> ''' _gsession = _GSettings(user=kwargs.get('user'), schema='org.gnome.desktop.interface', key='clock-show-date') return _gsession._get() def setClockShowDate(kvalue, **kwargs): ''' Set whether the date is visible in the clock CLI Example: .. code-block:: bash salt '*' gnome.setClockShowDate <True|False> user=<username> ''' if kvalue is not True and kvalue is not False: return False _gsession = _GSettings(user=kwargs.get('user'), schema='org.gnome.desktop.interface', key='clock-show-date') return _gsession._set(kvalue) def getIdleActivation(**kwargs): ''' Get whether the idle activation is enabled CLI Example: .. code-block:: bash salt '*' gnome.getIdleActivation user=<username> ''' _gsession = _GSettings(user=kwargs.get('user'), schema='org.gnome.desktop.screensaver', key='idle-activation-enabled') return _gsession._get() def setIdleActivation(kvalue, **kwargs): ''' Set whether the idle activation is enabled CLI Example: .. code-block:: bash salt '*' gnome.setIdleActivation <True|False> user=<username> ''' if kvalue is not True and kvalue is not False: return False _gsession = _GSettings(user=kwargs.get('user'), schema='org.gnome.desktop.screensaver', key='idle-activation-enabled') return _gsession._set(kvalue) def get(schema=None, key=None, user=None, **kwargs): ''' Get key in a particular GNOME schema CLI Example: .. code-block:: bash salt '*' gnome.get user=<username> schema=org.gnome.desktop.screensaver key=idle-activation-enabled ''' _gsession = _GSettings(user=user, schema=schema, key=key) return _gsession._get()
saltstack/salt
salt/modules/gnomedesktop.py
_GSettings._get
python
def _get(self): ''' get the value for user in gsettings ''' user = self.USER try: uid = pwd.getpwnam(user).pw_uid except KeyError: log.info('User does not exist') return False cmd = self.gsetting_command + ['get', str(self.SCHEMA), str(self.KEY)] environ = {} environ['XDG_RUNTIME_DIR'] = '/run/user/{0}'.format(uid) result = __salt__['cmd.run_all'](cmd, runas=user, env=environ, python_shell=False) if 'stdout' in result: if 'uint32' in result['stdout']: return re.sub('uint32 ', '', result['stdout']) else: return result['stdout'] else: return False
get the value for user in gsettings
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/gnomedesktop.py#L66-L89
null
class _GSettings(object): def __init__(self, user, schema, key): self.SCHEMA = schema self.KEY = key self.USER = user self.UID = None self.HOME = None @property def gsetting_command(self): ''' return the command to run the gsettings binary ''' if salt.utils.path.which_bin(['dbus-run-session']): cmd = ['dbus-run-session', '--', 'gsettings'] else: cmd = ['dbus-launch', '--exit-with-session', 'gsettings'] return cmd def _set(self, value): ''' set the value for user in gsettings ''' user = self.USER try: uid = pwd.getpwnam(user).pw_uid except KeyError: log.info('User does not exist') result = {} result['retcode'] = 1 result['stdout'] = 'User {0} does not exist'.format(user) return result cmd = self.gsetting_command + ['set', self.SCHEMA, self.KEY, value] environ = {} environ['XDG_RUNTIME_DIR'] = '/run/user/{0}'.format(uid) result = __salt__['cmd.run_all'](cmd, runas=user, env=environ, python_shell=False) return result
saltstack/salt
salt/modules/gnomedesktop.py
_GSettings._set
python
def _set(self, value): ''' set the value for user in gsettings ''' user = self.USER try: uid = pwd.getpwnam(user).pw_uid except KeyError: log.info('User does not exist') result = {} result['retcode'] = 1 result['stdout'] = 'User {0} does not exist'.format(user) return result cmd = self.gsetting_command + ['set', self.SCHEMA, self.KEY, value] environ = {} environ['XDG_RUNTIME_DIR'] = '/run/user/{0}'.format(uid) result = __salt__['cmd.run_all'](cmd, runas=user, env=environ, python_shell=False) return result
set the value for user in gsettings
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/gnomedesktop.py#L91-L110
null
class _GSettings(object): def __init__(self, user, schema, key): self.SCHEMA = schema self.KEY = key self.USER = user self.UID = None self.HOME = None @property def gsetting_command(self): ''' return the command to run the gsettings binary ''' if salt.utils.path.which_bin(['dbus-run-session']): cmd = ['dbus-run-session', '--', 'gsettings'] else: cmd = ['dbus-launch', '--exit-with-session', 'gsettings'] return cmd def _get(self): ''' get the value for user in gsettings ''' user = self.USER try: uid = pwd.getpwnam(user).pw_uid except KeyError: log.info('User does not exist') return False cmd = self.gsetting_command + ['get', str(self.SCHEMA), str(self.KEY)] environ = {} environ['XDG_RUNTIME_DIR'] = '/run/user/{0}'.format(uid) result = __salt__['cmd.run_all'](cmd, runas=user, env=environ, python_shell=False) if 'stdout' in result: if 'uint32' in result['stdout']: return re.sub('uint32 ', '', result['stdout']) else: return result['stdout'] else: return False
saltstack/salt
salt/modules/rpm_lowpkg.py
bin_pkg_info
python
def bin_pkg_info(path, saltenv='base'): ''' .. versionadded:: 2015.8.0 Parses RPM metadata and returns a dictionary of information about the package (name, version, etc.). path Path to the file. Can either be an absolute path to a file on the minion, or a salt fileserver URL (e.g. ``salt://path/to/file.rpm``). If a salt fileserver URL is passed, the file will be cached to the minion so that it can be examined. saltenv : base Salt fileserver environment from which to retrieve the package. Ignored if ``path`` is a local file path on the minion. CLI Example: .. code-block:: bash salt '*' lowpkg.bin_pkg_info /root/salt-2015.5.1-2.el7.noarch.rpm salt '*' lowpkg.bin_pkg_info salt://salt-2015.5.1-2.el7.noarch.rpm ''' # If the path is a valid protocol, pull it down using cp.cache_file if __salt__['config.valid_fileproto'](path): newpath = __salt__['cp.cache_file'](path, saltenv) if not newpath: raise CommandExecutionError( 'Unable to retrieve {0} from saltenv \'{1}\'' .format(path, saltenv) ) path = newpath else: if not os.path.exists(path): raise CommandExecutionError( '{0} does not exist on minion'.format(path) ) elif not os.path.isabs(path): raise SaltInvocationError( '{0} does not exist on minion'.format(path) ) # REPOID is not a valid tag for the rpm command. Remove it and replace it # with 'none' queryformat = salt.utils.pkg.rpm.QUERYFORMAT.replace('%{REPOID}', 'none') output = __salt__['cmd.run_stdout']( ['rpm', '-qp', '--queryformat', queryformat, path], output_loglevel='trace', ignore_retcode=True, python_shell=False ) ret = {} pkginfo = salt.utils.pkg.rpm.parse_pkginfo( output, osarch=__grains__['osarch'] ) try: for field in pkginfo._fields: ret[field] = getattr(pkginfo, field) except AttributeError: # pkginfo is None return None return ret
.. versionadded:: 2015.8.0 Parses RPM metadata and returns a dictionary of information about the package (name, version, etc.). path Path to the file. Can either be an absolute path to a file on the minion, or a salt fileserver URL (e.g. ``salt://path/to/file.rpm``). If a salt fileserver URL is passed, the file will be cached to the minion so that it can be examined. saltenv : base Salt fileserver environment from which to retrieve the package. Ignored if ``path`` is a local file path on the minion. CLI Example: .. code-block:: bash salt '*' lowpkg.bin_pkg_info /root/salt-2015.5.1-2.el7.noarch.rpm salt '*' lowpkg.bin_pkg_info salt://salt-2015.5.1-2.el7.noarch.rpm
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/rpm_lowpkg.py#L65-L128
[ "def parse_pkginfo(line, osarch=None):\n '''\n A small helper to parse an rpm/repoquery command's output. Returns a\n pkginfo namedtuple.\n '''\n try:\n name, epoch, version, release, arch, repoid, install_time = line.split('_|-')\n # Handle unpack errors (should never happen with the queryformat we are\n # using, but can't hurt to be careful).\n except ValueError:\n return None\n\n name = resolve_name(name, arch, osarch)\n if release:\n version += '-{0}'.format(release)\n if epoch not in ('(none)', '0'):\n version = ':'.join((epoch, version))\n\n if install_time not in ('(none)', '0'):\n install_date = datetime.datetime.utcfromtimestamp(int(install_time)).isoformat() + \"Z\"\n install_date_time_t = int(install_time)\n else:\n install_date = None\n install_date_time_t = None\n\n return pkginfo(name, version, arch, repoid, install_date, install_date_time_t)\n" ]
# -*- coding: utf-8 -*- ''' Support for rpm ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import logging import os import re import datetime from salt.utils.versions import LooseVersion # Import Salt libs import salt.utils.decorators.path import salt.utils.itertools import salt.utils.path import salt.utils.pkg.rpm import salt.utils.versions # pylint: disable=import-error,redefined-builtin from salt.ext.six.moves import zip from salt.ext import six try: import rpm HAS_RPM = True except ImportError: HAS_RPM = False try: import rpmUtils.miscutils HAS_RPMUTILS = True except ImportError: HAS_RPMUTILS = False # pylint: enable=import-error,redefined-builtin from salt.exceptions import CommandExecutionError, SaltInvocationError log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'lowpkg' def __virtual__(): ''' Confine this module to rpm based systems ''' if not salt.utils.path.which('rpm'): return (False, 'The rpm execution module failed to load: rpm binary is not in the path.') try: os_grain = __grains__['os'].lower() os_family = __grains__['os_family'].lower() except Exception: return (False, 'The rpm execution module failed to load: failed to detect os or os_family grains.') enabled = ('amazon', 'xcp', 'xenserver', 'VirtuozzoLinux') if os_family in ['redhat', 'suse'] or os_grain in enabled: return __virtualname__ return (False, 'The rpm execution module failed to load: only available on redhat/suse type systems ' 'or amazon, xcp or xenserver.') def list_pkgs(*packages, **kwargs): ''' List the packages currently installed in a dict:: {'<package_name>': '<version>'} root use root as top level directory (default: "/") CLI Example: .. code-block:: bash salt '*' lowpkg.list_pkgs ''' pkgs = {} cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['-q' if packages else '-qa', '--queryformat', r'%{NAME} %{VERSION}\n']) if packages: cmd.extend(packages) out = __salt__['cmd.run'](cmd, output_loglevel='trace', python_shell=False) for line in salt.utils.itertools.split(out, '\n'): if 'is not installed' in line: continue comps = line.split() pkgs[comps[0]] = comps[1] return pkgs def verify(*packages, **kwargs): ''' Runs an rpm -Va on a system, and returns the results in a dict root use root as top level directory (default: "/") Files with an attribute of config, doc, ghost, license or readme in the package header can be ignored using the ``ignore_types`` keyword argument CLI Example: .. code-block:: bash salt '*' lowpkg.verify salt '*' lowpkg.verify httpd salt '*' lowpkg.verify httpd postfix salt '*' lowpkg.verify httpd postfix ignore_types=['config','doc'] ''' ftypes = {'c': 'config', 'd': 'doc', 'g': 'ghost', 'l': 'license', 'r': 'readme'} ret = {} ignore_types = kwargs.get('ignore_types', []) if not isinstance(ignore_types, (list, six.string_types)): raise SaltInvocationError( 'ignore_types must be a list or a comma-separated string' ) if isinstance(ignore_types, six.string_types): try: ignore_types = [x.strip() for x in ignore_types.split(',')] except AttributeError: ignore_types = [x.strip() for x in six.text_type(ignore_types).split(',')] verify_options = kwargs.get('verify_options', []) if not isinstance(verify_options, (list, six.string_types)): raise SaltInvocationError( 'verify_options must be a list or a comma-separated string' ) if isinstance(verify_options, six.string_types): try: verify_options = [x.strip() for x in verify_options.split(',')] except AttributeError: verify_options = [x.strip() for x in six.text_type(verify_options).split(',')] cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['--' + x for x in verify_options]) if packages: cmd.append('-V') # Can't concatenate a tuple, must do a list.extend() cmd.extend(packages) else: cmd.append('-Va') out = __salt__['cmd.run_all'](cmd, output_loglevel='trace', ignore_retcode=True, python_shell=False) if not out['stdout'].strip() and out['retcode'] != 0: # If there is no stdout and the retcode is 0, then verification # succeeded, but if the retcode is nonzero, then the command failed. msg = 'Failed to verify package(s)' if out['stderr']: msg += ': {0}'.format(out['stderr']) raise CommandExecutionError(msg) for line in salt.utils.itertools.split(out['stdout'], '\n'): fdict = {'mismatch': []} if 'missing' in line: line = ' ' + line fdict['missing'] = True del fdict['mismatch'] fname = line[13:] if line[11:12] in ftypes: fdict['type'] = ftypes[line[11:12]] if 'type' not in fdict or fdict['type'] not in ignore_types: if line[0:1] == 'S': fdict['mismatch'].append('size') if line[1:2] == 'M': fdict['mismatch'].append('mode') if line[2:3] == '5': fdict['mismatch'].append('md5sum') if line[3:4] == 'D': fdict['mismatch'].append('device major/minor number') if line[4:5] == 'L': fdict['mismatch'].append('readlink path') if line[5:6] == 'U': fdict['mismatch'].append('user') if line[6:7] == 'G': fdict['mismatch'].append('group') if line[7:8] == 'T': fdict['mismatch'].append('mtime') if line[8:9] == 'P': fdict['mismatch'].append('capabilities') ret[fname] = fdict return ret def modified(*packages, **flags): ''' List the modified files that belong to a package. Not specifying any packages will return a list of _all_ modified files on the system's RPM database. .. versionadded:: 2015.5.0 root use root as top level directory (default: "/") CLI examples: .. code-block:: bash salt '*' lowpkg.modified httpd salt '*' lowpkg.modified httpd postfix salt '*' lowpkg.modified ''' cmd = ['rpm'] if flags.get('root'): cmd.extend(['--root', flags.pop('root')]) cmd.append('-Va') cmd.extend(packages) ret = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False) data = {} # If verification has an output, then it means it failed # and the return code will be 1. We are interested in any bigger # than 1 code. if ret['retcode'] > 1: del ret['stdout'] return ret elif not ret['retcode']: return data ptrn = re.compile(r"\s+") changes = cfg = f_name = None for f_info in salt.utils.itertools.split(ret['stdout'], '\n'): f_info = ptrn.split(f_info) if len(f_info) == 3: # Config file changes, cfg, f_name = f_info else: changes, f_name = f_info cfg = None keys = ['size', 'mode', 'checksum', 'device', 'symlink', 'owner', 'group', 'time', 'capabilities'] changes = list(changes) if len(changes) == 8: # Older RPMs do not support capabilities changes.append('.') stats = [] for k, v in zip(keys, changes): if v != '.': stats.append(k) if cfg is not None: stats.append('config') data[f_name] = stats if not flags: return data # Filtering filtered_data = {} for f_name, stats in data.items(): include = True for param, pval in flags.items(): if param.startswith("_"): continue if (not pval and param in stats) or \ (pval and param not in stats): include = False break if include: filtered_data[f_name] = stats return filtered_data def file_list(*packages, **kwargs): ''' List the files that belong to a package. Not specifying any packages will return a list of _every_ file on the system's rpm database (not generally recommended). root use root as top level directory (default: "/") CLI Examples: .. code-block:: bash salt '*' lowpkg.file_list httpd salt '*' lowpkg.file_list httpd postfix salt '*' lowpkg.file_list ''' cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.append('-ql' if packages else '-qla') if packages: # Can't concatenate a tuple, must do a list.extend() cmd.extend(packages) ret = __salt__['cmd.run']( cmd, output_loglevel='trace', python_shell=False).splitlines() return {'errors': [], 'files': ret} def file_dict(*packages, **kwargs): ''' List the files that belong to a package, sorted by group. Not specifying any packages will return a list of _every_ file on the system's rpm database (not generally recommended). root use root as top level directory (default: "/") CLI Examples: .. code-block:: bash salt '*' lowpkg.file_dict httpd salt '*' lowpkg.file_dict httpd postfix salt '*' lowpkg.file_dict ''' errors = [] ret = {} pkgs = {} cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['-q' if packages else '-qa', '--queryformat', r'%{NAME} %{VERSION}\n']) if packages: cmd.extend(packages) out = __salt__['cmd.run'](cmd, output_loglevel='trace', python_shell=False) for line in salt.utils.itertools.split(out, '\n'): if 'is not installed' in line: errors.append(line) continue comps = line.split() pkgs[comps[0]] = {'version': comps[1]} for pkg in pkgs: cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['-ql', pkg]) out = __salt__['cmd.run']( ['rpm', '-ql', pkg], output_loglevel='trace', python_shell=False) ret[pkg] = out.splitlines() return {'errors': errors, 'packages': ret} def owner(*paths, **kwargs): ''' Return the name of the package that owns the file. Multiple file paths can be passed. If a single path is passed, a string will be returned, and if multiple paths are passed, a dictionary of file/package name pairs will be returned. If the file is not owned by a package, or is not present on the minion, then an empty string will be returned for that path. root use root as top level directory (default: "/") CLI Examples: .. code-block:: bash salt '*' lowpkg.owner /usr/bin/apachectl salt '*' lowpkg.owner /usr/bin/apachectl /etc/httpd/conf/httpd.conf ''' if not paths: return '' ret = {} for path in paths: cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['-qf', '--queryformat', '%{name}', path]) ret[path] = __salt__['cmd.run_stdout'](cmd, output_loglevel='trace', python_shell=False) if 'not owned' in ret[path].lower(): ret[path] = '' if len(ret) == 1: return list(ret.values())[0] return ret @salt.utils.decorators.path.which('rpm2cpio') @salt.utils.decorators.path.which('cpio') @salt.utils.decorators.path.which('diff') def diff(package_path, path): ''' Return a formatted diff between current file and original in a package. NOTE: this function includes all files (configuration and not), but does not work on binary content. :param package: Full pack of the RPM file :param path: Full path to the installed file :return: Difference or empty string. For binary files only a notification. CLI example: .. code-block:: bash salt '*' lowpkg.diff /path/to/apache2.rpm /etc/apache2/httpd.conf ''' cmd = "rpm2cpio {0} " \ "| cpio -i --quiet --to-stdout .{1} " \ "| diff -u --label 'A {1}' --from-file=- --label 'B {1}' {1}" res = __salt__['cmd.shell'](cmd.format(package_path, path), output_loglevel='trace') if res and res.startswith('Binary file'): return 'File \'{0}\' is binary and its content has been ' \ 'modified.'.format(path) return res def info(*packages, **kwargs): ''' Return a detailed package(s) summary information. If no packages specified, all packages will be returned. :param packages: :param attr: Comma-separated package attributes. If no 'attr' is specified, all available attributes returned. Valid attributes are: version, vendor, release, build_date, build_date_time_t, install_date, install_date_time_t, build_host, group, source_rpm, arch, epoch, size, license, signature, packager, url, summary, description. :param all_versions: Return information for all installed versions of the packages :param root: use root as top level directory (default: "/") :return: CLI example: .. code-block:: bash salt '*' lowpkg.info apache2 bash salt '*' lowpkg.info apache2 bash attr=version salt '*' lowpkg.info apache2 bash attr=version,build_date_iso,size salt '*' lowpkg.info apache2 bash attr=version,build_date_iso,size all_versions=True ''' all_versions = kwargs.get('all_versions', False) # LONGSIZE is not a valid tag for all versions of rpm. If LONGSIZE isn't # available, then we can just use SIZE for older versions. See Issue #31366. rpm_tags = __salt__['cmd.run_stdout']( ['rpm', '--querytags'], python_shell=False).splitlines() if 'LONGSIZE' in rpm_tags: size_tag = '%{LONGSIZE}' else: size_tag = '%{SIZE}' cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) if packages: cmd.append('-q') cmd.extend(packages) else: cmd.append('-qa') # Construct query format attr_map = { "name": "name: %{NAME}\\n", "relocations": "relocations: %|PREFIXES?{[%{PREFIXES} ]}:{(not relocatable)}|\\n", "version": "version: %{VERSION}\\n", "vendor": "vendor: %{VENDOR}\\n", "release": "release: %{RELEASE}\\n", "epoch": "%|EPOCH?{epoch: %{EPOCH}\\n}|", "build_date_time_t": "build_date_time_t: %{BUILDTIME}\\n", "build_date": "build_date: %{BUILDTIME}\\n", "install_date_time_t": "install_date_time_t: %|INSTALLTIME?{%{INSTALLTIME}}:{(not installed)}|\\n", "install_date": "install_date: %|INSTALLTIME?{%{INSTALLTIME}}:{(not installed)}|\\n", "build_host": "build_host: %{BUILDHOST}\\n", "group": "group: %{GROUP}\\n", "source_rpm": "source_rpm: %{SOURCERPM}\\n", "size": "size: " + size_tag + "\\n", "arch": "arch: %{ARCH}\\n", "license": "%|LICENSE?{license: %{LICENSE}\\n}|", "signature": "signature: %|DSAHEADER?{%{DSAHEADER:pgpsig}}:{%|RSAHEADER?{%{RSAHEADER:pgpsig}}:" "{%|SIGGPG?{%{SIGGPG:pgpsig}}:{%|SIGPGP?{%{SIGPGP:pgpsig}}:{(none)}|}|}|}|\\n", "packager": "%|PACKAGER?{packager: %{PACKAGER}\\n}|", "url": "%|URL?{url: %{URL}\\n}|", "summary": "summary: %{SUMMARY}\\n", "description": "description:\\n%{DESCRIPTION}\\n", "edition": "edition: %|EPOCH?{%{EPOCH}:}|%{VERSION}-%{RELEASE}\\n", } attr = kwargs.get('attr', None) and kwargs['attr'].split(",") or None query = list() if attr: for attr_k in attr: if attr_k in attr_map and attr_k != 'description': query.append(attr_map[attr_k]) if not query: raise CommandExecutionError('No valid attributes found.') if 'name' not in attr: attr.append('name') query.append(attr_map['name']) if 'edition' not in attr: attr.append('edition') query.append(attr_map['edition']) else: for attr_k, attr_v in six.iteritems(attr_map): if attr_k != 'description': query.append(attr_v) if attr and 'description' in attr or not attr: query.append(attr_map['description']) query.append("-----\\n") cmd = ' '.join(cmd) call = __salt__['cmd.run_all'](cmd + (" --queryformat '{0}'".format(''.join(query))), output_loglevel='trace', env={'TZ': 'UTC'}, clean_env=True) if call['retcode'] != 0: comment = '' if 'stderr' in call: comment += (call['stderr'] or call['stdout']) raise CommandExecutionError(comment) elif 'error' in call['stderr']: raise CommandExecutionError(call['stderr']) else: out = call['stdout'] _ret = list() for pkg_info in re.split(r"----*", out): pkg_info = pkg_info.strip() if not pkg_info: continue pkg_info = pkg_info.split(os.linesep) if pkg_info[-1].lower().startswith('distribution'): pkg_info = pkg_info[:-1] pkg_data = dict() pkg_name = None descr_marker = False descr = list() for line in pkg_info: if descr_marker: descr.append(line) continue line = [item.strip() for item in line.split(':', 1)] if len(line) != 2: continue key, value = line if key == 'description': descr_marker = True continue if key == 'name': pkg_name = value # Convert Unix ticks into ISO time format if key in ['build_date', 'install_date']: try: pkg_data[key] = datetime.datetime.utcfromtimestamp(int(value)).isoformat() + "Z" except ValueError: log.warning('Could not convert "%s" into Unix time', value) continue # Convert Unix ticks into an Integer if key in ['build_date_time_t', 'install_date_time_t']: try: pkg_data[key] = int(value) except ValueError: log.warning('Could not convert "%s" into Unix time', value) continue if key not in ['description', 'name'] and value: pkg_data[key] = value if attr and 'description' in attr or not attr: pkg_data['description'] = os.linesep.join(descr) if pkg_name: pkg_data['name'] = pkg_name _ret.append(pkg_data) # Force-sort package data by version, # pick only latest versions # (in case multiple packages installed, e.g. kernel) ret = dict() for pkg_data in reversed(sorted(_ret, key=lambda x: LooseVersion(x['edition']))): pkg_name = pkg_data.pop('name') # Filter out GPG public keys packages if pkg_name.startswith('gpg-pubkey'): continue if pkg_name not in ret: if all_versions: ret[pkg_name] = [pkg_data.copy()] else: ret[pkg_name] = pkg_data.copy() del ret[pkg_name]['edition'] elif all_versions: ret[pkg_name].append(pkg_data.copy()) return ret def version_cmp(ver1, ver2, ignore_epoch=False): ''' .. versionadded:: 2015.8.9 Do a cmp-style comparison on two packages. Return -1 if ver1 < ver2, 0 if ver1 == ver2, and 1 if ver1 > ver2. Return None if there was a problem making the comparison. ignore_epoch : False Set to ``True`` to ignore the epoch when comparing versions .. versionadded:: 2015.8.10,2016.3.2 CLI Example: .. code-block:: bash salt '*' pkg.version_cmp '0.2-001' '0.2.0.1-002' ''' normalize = lambda x: six.text_type(x).split(':', 1)[-1] \ if ignore_epoch \ else six.text_type(x) ver1 = normalize(ver1) ver2 = normalize(ver2) try: cmp_func = None if HAS_RPM: try: cmp_func = rpm.labelCompare except AttributeError: # Catches corner case where someone has a module named "rpm" in # their pythonpath. log.debug( 'rpm module imported, but it does not have the ' 'labelCompare function. Not using rpm.labelCompare for ' 'version comparison.' ) if cmp_func is None and HAS_RPMUTILS: try: cmp_func = rpmUtils.miscutils.compareEVR except AttributeError: log.debug('rpmUtils.miscutils.compareEVR is not available') if cmp_func is None: if salt.utils.path.which('rpmdev-vercmp'): # rpmdev-vercmp always uses epochs, even when zero def _ensure_epoch(ver): def _prepend(ver): return '0:{0}'.format(ver) try: if ':' not in ver: return _prepend(ver) except TypeError: return _prepend(ver) return ver ver1 = _ensure_epoch(ver1) ver2 = _ensure_epoch(ver2) result = __salt__['cmd.run_all']( ['rpmdev-vercmp', ver1, ver2], python_shell=False, redirect_stderr=True, ignore_retcode=True) # rpmdev-vercmp returns 0 on equal, 11 on greater-than, and # 12 on less-than. if result['retcode'] == 0: return 0 elif result['retcode'] == 11: return 1 elif result['retcode'] == 12: return -1 else: # We'll need to fall back to salt.utils.versions.version_cmp() log.warning( 'Failed to interpret results of rpmdev-vercmp output. ' 'This is probably a bug, and should be reported. ' 'Return code was %s. Output: %s', result['retcode'], result['stdout'] ) else: # We'll need to fall back to salt.utils.versions.version_cmp() log.warning( 'rpmdevtools is not installed, please install it for ' 'more accurate version comparisons' ) else: # If one EVR is missing a release but not the other and they # otherwise would be equal, ignore the release. This can happen if # e.g. you are checking if a package version 3.2 is satisfied by # 3.2-1. (ver1_e, ver1_v, ver1_r) = salt.utils.pkg.rpm.version_to_evr(ver1) (ver2_e, ver2_v, ver2_r) = salt.utils.pkg.rpm.version_to_evr(ver2) if not ver1_r or not ver2_r: ver1_r = ver2_r = '' cmp_result = cmp_func((ver1_e, ver1_v, ver1_r), (ver2_e, ver2_v, ver2_r)) if cmp_result not in (-1, 0, 1): raise CommandExecutionError( 'Comparison result \'{0}\' is invalid'.format(cmp_result) ) return cmp_result except Exception as exc: log.warning( 'Failed to compare version \'%s\' to \'%s\' using RPM: %s', ver1, ver2, exc ) # We would already have normalized the versions at the beginning of this # function if ignore_epoch=True, so avoid unnecessary work and just pass # False for this value. return salt.utils.versions.version_cmp(ver1, ver2, ignore_epoch=False) def checksum(*paths, **kwargs): ''' Return if the signature of a RPM file is valid. root use root as top level directory (default: "/") CLI Example: .. code-block:: bash salt '*' lowpkg.checksum /path/to/package1.rpm salt '*' lowpkg.checksum /path/to/package1.rpm /path/to/package2.rpm ''' ret = dict() if not paths: raise CommandExecutionError("No package files has been specified.") cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['-K', '--quiet']) for package_file in paths: cmd_ = cmd + [package_file] ret[package_file] = (bool(__salt__['file.file_exists'](package_file)) and not __salt__['cmd.retcode'](cmd_, ignore_retcode=True, output_loglevel='trace', python_shell=False)) return ret
saltstack/salt
salt/modules/rpm_lowpkg.py
list_pkgs
python
def list_pkgs(*packages, **kwargs): ''' List the packages currently installed in a dict:: {'<package_name>': '<version>'} root use root as top level directory (default: "/") CLI Example: .. code-block:: bash salt '*' lowpkg.list_pkgs ''' pkgs = {} cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['-q' if packages else '-qa', '--queryformat', r'%{NAME} %{VERSION}\n']) if packages: cmd.extend(packages) out = __salt__['cmd.run'](cmd, output_loglevel='trace', python_shell=False) for line in salt.utils.itertools.split(out, '\n'): if 'is not installed' in line: continue comps = line.split() pkgs[comps[0]] = comps[1] return pkgs
List the packages currently installed in a dict:: {'<package_name>': '<version>'} root use root as top level directory (default: "/") CLI Example: .. code-block:: bash salt '*' lowpkg.list_pkgs
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/rpm_lowpkg.py#L131-L160
[ "def split(orig, sep=None):\n '''\n Generator function for iterating through large strings, particularly useful\n as a replacement for str.splitlines().\n\n See http://stackoverflow.com/a/3865367\n '''\n exp = re.compile(r'\\s+' if sep is None else re.escape(sep))\n pos = 0\n length = len(orig)\n while True:\n match = exp.search(orig, pos)\n if not match:\n if pos < length or sep is not None:\n val = orig[pos:]\n if val:\n # Only yield a value if the slice was not an empty string,\n # because if it is then we've reached the end. This keeps\n # us from yielding an extra blank value at the end.\n yield val\n break\n if pos < match.start() or sep is not None:\n yield orig[pos:match.start()]\n pos = match.end()\n" ]
# -*- coding: utf-8 -*- ''' Support for rpm ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import logging import os import re import datetime from salt.utils.versions import LooseVersion # Import Salt libs import salt.utils.decorators.path import salt.utils.itertools import salt.utils.path import salt.utils.pkg.rpm import salt.utils.versions # pylint: disable=import-error,redefined-builtin from salt.ext.six.moves import zip from salt.ext import six try: import rpm HAS_RPM = True except ImportError: HAS_RPM = False try: import rpmUtils.miscutils HAS_RPMUTILS = True except ImportError: HAS_RPMUTILS = False # pylint: enable=import-error,redefined-builtin from salt.exceptions import CommandExecutionError, SaltInvocationError log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'lowpkg' def __virtual__(): ''' Confine this module to rpm based systems ''' if not salt.utils.path.which('rpm'): return (False, 'The rpm execution module failed to load: rpm binary is not in the path.') try: os_grain = __grains__['os'].lower() os_family = __grains__['os_family'].lower() except Exception: return (False, 'The rpm execution module failed to load: failed to detect os or os_family grains.') enabled = ('amazon', 'xcp', 'xenserver', 'VirtuozzoLinux') if os_family in ['redhat', 'suse'] or os_grain in enabled: return __virtualname__ return (False, 'The rpm execution module failed to load: only available on redhat/suse type systems ' 'or amazon, xcp or xenserver.') def bin_pkg_info(path, saltenv='base'): ''' .. versionadded:: 2015.8.0 Parses RPM metadata and returns a dictionary of information about the package (name, version, etc.). path Path to the file. Can either be an absolute path to a file on the minion, or a salt fileserver URL (e.g. ``salt://path/to/file.rpm``). If a salt fileserver URL is passed, the file will be cached to the minion so that it can be examined. saltenv : base Salt fileserver environment from which to retrieve the package. Ignored if ``path`` is a local file path on the minion. CLI Example: .. code-block:: bash salt '*' lowpkg.bin_pkg_info /root/salt-2015.5.1-2.el7.noarch.rpm salt '*' lowpkg.bin_pkg_info salt://salt-2015.5.1-2.el7.noarch.rpm ''' # If the path is a valid protocol, pull it down using cp.cache_file if __salt__['config.valid_fileproto'](path): newpath = __salt__['cp.cache_file'](path, saltenv) if not newpath: raise CommandExecutionError( 'Unable to retrieve {0} from saltenv \'{1}\'' .format(path, saltenv) ) path = newpath else: if not os.path.exists(path): raise CommandExecutionError( '{0} does not exist on minion'.format(path) ) elif not os.path.isabs(path): raise SaltInvocationError( '{0} does not exist on minion'.format(path) ) # REPOID is not a valid tag for the rpm command. Remove it and replace it # with 'none' queryformat = salt.utils.pkg.rpm.QUERYFORMAT.replace('%{REPOID}', 'none') output = __salt__['cmd.run_stdout']( ['rpm', '-qp', '--queryformat', queryformat, path], output_loglevel='trace', ignore_retcode=True, python_shell=False ) ret = {} pkginfo = salt.utils.pkg.rpm.parse_pkginfo( output, osarch=__grains__['osarch'] ) try: for field in pkginfo._fields: ret[field] = getattr(pkginfo, field) except AttributeError: # pkginfo is None return None return ret def verify(*packages, **kwargs): ''' Runs an rpm -Va on a system, and returns the results in a dict root use root as top level directory (default: "/") Files with an attribute of config, doc, ghost, license or readme in the package header can be ignored using the ``ignore_types`` keyword argument CLI Example: .. code-block:: bash salt '*' lowpkg.verify salt '*' lowpkg.verify httpd salt '*' lowpkg.verify httpd postfix salt '*' lowpkg.verify httpd postfix ignore_types=['config','doc'] ''' ftypes = {'c': 'config', 'd': 'doc', 'g': 'ghost', 'l': 'license', 'r': 'readme'} ret = {} ignore_types = kwargs.get('ignore_types', []) if not isinstance(ignore_types, (list, six.string_types)): raise SaltInvocationError( 'ignore_types must be a list or a comma-separated string' ) if isinstance(ignore_types, six.string_types): try: ignore_types = [x.strip() for x in ignore_types.split(',')] except AttributeError: ignore_types = [x.strip() for x in six.text_type(ignore_types).split(',')] verify_options = kwargs.get('verify_options', []) if not isinstance(verify_options, (list, six.string_types)): raise SaltInvocationError( 'verify_options must be a list or a comma-separated string' ) if isinstance(verify_options, six.string_types): try: verify_options = [x.strip() for x in verify_options.split(',')] except AttributeError: verify_options = [x.strip() for x in six.text_type(verify_options).split(',')] cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['--' + x for x in verify_options]) if packages: cmd.append('-V') # Can't concatenate a tuple, must do a list.extend() cmd.extend(packages) else: cmd.append('-Va') out = __salt__['cmd.run_all'](cmd, output_loglevel='trace', ignore_retcode=True, python_shell=False) if not out['stdout'].strip() and out['retcode'] != 0: # If there is no stdout and the retcode is 0, then verification # succeeded, but if the retcode is nonzero, then the command failed. msg = 'Failed to verify package(s)' if out['stderr']: msg += ': {0}'.format(out['stderr']) raise CommandExecutionError(msg) for line in salt.utils.itertools.split(out['stdout'], '\n'): fdict = {'mismatch': []} if 'missing' in line: line = ' ' + line fdict['missing'] = True del fdict['mismatch'] fname = line[13:] if line[11:12] in ftypes: fdict['type'] = ftypes[line[11:12]] if 'type' not in fdict or fdict['type'] not in ignore_types: if line[0:1] == 'S': fdict['mismatch'].append('size') if line[1:2] == 'M': fdict['mismatch'].append('mode') if line[2:3] == '5': fdict['mismatch'].append('md5sum') if line[3:4] == 'D': fdict['mismatch'].append('device major/minor number') if line[4:5] == 'L': fdict['mismatch'].append('readlink path') if line[5:6] == 'U': fdict['mismatch'].append('user') if line[6:7] == 'G': fdict['mismatch'].append('group') if line[7:8] == 'T': fdict['mismatch'].append('mtime') if line[8:9] == 'P': fdict['mismatch'].append('capabilities') ret[fname] = fdict return ret def modified(*packages, **flags): ''' List the modified files that belong to a package. Not specifying any packages will return a list of _all_ modified files on the system's RPM database. .. versionadded:: 2015.5.0 root use root as top level directory (default: "/") CLI examples: .. code-block:: bash salt '*' lowpkg.modified httpd salt '*' lowpkg.modified httpd postfix salt '*' lowpkg.modified ''' cmd = ['rpm'] if flags.get('root'): cmd.extend(['--root', flags.pop('root')]) cmd.append('-Va') cmd.extend(packages) ret = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False) data = {} # If verification has an output, then it means it failed # and the return code will be 1. We are interested in any bigger # than 1 code. if ret['retcode'] > 1: del ret['stdout'] return ret elif not ret['retcode']: return data ptrn = re.compile(r"\s+") changes = cfg = f_name = None for f_info in salt.utils.itertools.split(ret['stdout'], '\n'): f_info = ptrn.split(f_info) if len(f_info) == 3: # Config file changes, cfg, f_name = f_info else: changes, f_name = f_info cfg = None keys = ['size', 'mode', 'checksum', 'device', 'symlink', 'owner', 'group', 'time', 'capabilities'] changes = list(changes) if len(changes) == 8: # Older RPMs do not support capabilities changes.append('.') stats = [] for k, v in zip(keys, changes): if v != '.': stats.append(k) if cfg is not None: stats.append('config') data[f_name] = stats if not flags: return data # Filtering filtered_data = {} for f_name, stats in data.items(): include = True for param, pval in flags.items(): if param.startswith("_"): continue if (not pval and param in stats) or \ (pval and param not in stats): include = False break if include: filtered_data[f_name] = stats return filtered_data def file_list(*packages, **kwargs): ''' List the files that belong to a package. Not specifying any packages will return a list of _every_ file on the system's rpm database (not generally recommended). root use root as top level directory (default: "/") CLI Examples: .. code-block:: bash salt '*' lowpkg.file_list httpd salt '*' lowpkg.file_list httpd postfix salt '*' lowpkg.file_list ''' cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.append('-ql' if packages else '-qla') if packages: # Can't concatenate a tuple, must do a list.extend() cmd.extend(packages) ret = __salt__['cmd.run']( cmd, output_loglevel='trace', python_shell=False).splitlines() return {'errors': [], 'files': ret} def file_dict(*packages, **kwargs): ''' List the files that belong to a package, sorted by group. Not specifying any packages will return a list of _every_ file on the system's rpm database (not generally recommended). root use root as top level directory (default: "/") CLI Examples: .. code-block:: bash salt '*' lowpkg.file_dict httpd salt '*' lowpkg.file_dict httpd postfix salt '*' lowpkg.file_dict ''' errors = [] ret = {} pkgs = {} cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['-q' if packages else '-qa', '--queryformat', r'%{NAME} %{VERSION}\n']) if packages: cmd.extend(packages) out = __salt__['cmd.run'](cmd, output_loglevel='trace', python_shell=False) for line in salt.utils.itertools.split(out, '\n'): if 'is not installed' in line: errors.append(line) continue comps = line.split() pkgs[comps[0]] = {'version': comps[1]} for pkg in pkgs: cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['-ql', pkg]) out = __salt__['cmd.run']( ['rpm', '-ql', pkg], output_loglevel='trace', python_shell=False) ret[pkg] = out.splitlines() return {'errors': errors, 'packages': ret} def owner(*paths, **kwargs): ''' Return the name of the package that owns the file. Multiple file paths can be passed. If a single path is passed, a string will be returned, and if multiple paths are passed, a dictionary of file/package name pairs will be returned. If the file is not owned by a package, or is not present on the minion, then an empty string will be returned for that path. root use root as top level directory (default: "/") CLI Examples: .. code-block:: bash salt '*' lowpkg.owner /usr/bin/apachectl salt '*' lowpkg.owner /usr/bin/apachectl /etc/httpd/conf/httpd.conf ''' if not paths: return '' ret = {} for path in paths: cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['-qf', '--queryformat', '%{name}', path]) ret[path] = __salt__['cmd.run_stdout'](cmd, output_loglevel='trace', python_shell=False) if 'not owned' in ret[path].lower(): ret[path] = '' if len(ret) == 1: return list(ret.values())[0] return ret @salt.utils.decorators.path.which('rpm2cpio') @salt.utils.decorators.path.which('cpio') @salt.utils.decorators.path.which('diff') def diff(package_path, path): ''' Return a formatted diff between current file and original in a package. NOTE: this function includes all files (configuration and not), but does not work on binary content. :param package: Full pack of the RPM file :param path: Full path to the installed file :return: Difference or empty string. For binary files only a notification. CLI example: .. code-block:: bash salt '*' lowpkg.diff /path/to/apache2.rpm /etc/apache2/httpd.conf ''' cmd = "rpm2cpio {0} " \ "| cpio -i --quiet --to-stdout .{1} " \ "| diff -u --label 'A {1}' --from-file=- --label 'B {1}' {1}" res = __salt__['cmd.shell'](cmd.format(package_path, path), output_loglevel='trace') if res and res.startswith('Binary file'): return 'File \'{0}\' is binary and its content has been ' \ 'modified.'.format(path) return res def info(*packages, **kwargs): ''' Return a detailed package(s) summary information. If no packages specified, all packages will be returned. :param packages: :param attr: Comma-separated package attributes. If no 'attr' is specified, all available attributes returned. Valid attributes are: version, vendor, release, build_date, build_date_time_t, install_date, install_date_time_t, build_host, group, source_rpm, arch, epoch, size, license, signature, packager, url, summary, description. :param all_versions: Return information for all installed versions of the packages :param root: use root as top level directory (default: "/") :return: CLI example: .. code-block:: bash salt '*' lowpkg.info apache2 bash salt '*' lowpkg.info apache2 bash attr=version salt '*' lowpkg.info apache2 bash attr=version,build_date_iso,size salt '*' lowpkg.info apache2 bash attr=version,build_date_iso,size all_versions=True ''' all_versions = kwargs.get('all_versions', False) # LONGSIZE is not a valid tag for all versions of rpm. If LONGSIZE isn't # available, then we can just use SIZE for older versions. See Issue #31366. rpm_tags = __salt__['cmd.run_stdout']( ['rpm', '--querytags'], python_shell=False).splitlines() if 'LONGSIZE' in rpm_tags: size_tag = '%{LONGSIZE}' else: size_tag = '%{SIZE}' cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) if packages: cmd.append('-q') cmd.extend(packages) else: cmd.append('-qa') # Construct query format attr_map = { "name": "name: %{NAME}\\n", "relocations": "relocations: %|PREFIXES?{[%{PREFIXES} ]}:{(not relocatable)}|\\n", "version": "version: %{VERSION}\\n", "vendor": "vendor: %{VENDOR}\\n", "release": "release: %{RELEASE}\\n", "epoch": "%|EPOCH?{epoch: %{EPOCH}\\n}|", "build_date_time_t": "build_date_time_t: %{BUILDTIME}\\n", "build_date": "build_date: %{BUILDTIME}\\n", "install_date_time_t": "install_date_time_t: %|INSTALLTIME?{%{INSTALLTIME}}:{(not installed)}|\\n", "install_date": "install_date: %|INSTALLTIME?{%{INSTALLTIME}}:{(not installed)}|\\n", "build_host": "build_host: %{BUILDHOST}\\n", "group": "group: %{GROUP}\\n", "source_rpm": "source_rpm: %{SOURCERPM}\\n", "size": "size: " + size_tag + "\\n", "arch": "arch: %{ARCH}\\n", "license": "%|LICENSE?{license: %{LICENSE}\\n}|", "signature": "signature: %|DSAHEADER?{%{DSAHEADER:pgpsig}}:{%|RSAHEADER?{%{RSAHEADER:pgpsig}}:" "{%|SIGGPG?{%{SIGGPG:pgpsig}}:{%|SIGPGP?{%{SIGPGP:pgpsig}}:{(none)}|}|}|}|\\n", "packager": "%|PACKAGER?{packager: %{PACKAGER}\\n}|", "url": "%|URL?{url: %{URL}\\n}|", "summary": "summary: %{SUMMARY}\\n", "description": "description:\\n%{DESCRIPTION}\\n", "edition": "edition: %|EPOCH?{%{EPOCH}:}|%{VERSION}-%{RELEASE}\\n", } attr = kwargs.get('attr', None) and kwargs['attr'].split(",") or None query = list() if attr: for attr_k in attr: if attr_k in attr_map and attr_k != 'description': query.append(attr_map[attr_k]) if not query: raise CommandExecutionError('No valid attributes found.') if 'name' not in attr: attr.append('name') query.append(attr_map['name']) if 'edition' not in attr: attr.append('edition') query.append(attr_map['edition']) else: for attr_k, attr_v in six.iteritems(attr_map): if attr_k != 'description': query.append(attr_v) if attr and 'description' in attr or not attr: query.append(attr_map['description']) query.append("-----\\n") cmd = ' '.join(cmd) call = __salt__['cmd.run_all'](cmd + (" --queryformat '{0}'".format(''.join(query))), output_loglevel='trace', env={'TZ': 'UTC'}, clean_env=True) if call['retcode'] != 0: comment = '' if 'stderr' in call: comment += (call['stderr'] or call['stdout']) raise CommandExecutionError(comment) elif 'error' in call['stderr']: raise CommandExecutionError(call['stderr']) else: out = call['stdout'] _ret = list() for pkg_info in re.split(r"----*", out): pkg_info = pkg_info.strip() if not pkg_info: continue pkg_info = pkg_info.split(os.linesep) if pkg_info[-1].lower().startswith('distribution'): pkg_info = pkg_info[:-1] pkg_data = dict() pkg_name = None descr_marker = False descr = list() for line in pkg_info: if descr_marker: descr.append(line) continue line = [item.strip() for item in line.split(':', 1)] if len(line) != 2: continue key, value = line if key == 'description': descr_marker = True continue if key == 'name': pkg_name = value # Convert Unix ticks into ISO time format if key in ['build_date', 'install_date']: try: pkg_data[key] = datetime.datetime.utcfromtimestamp(int(value)).isoformat() + "Z" except ValueError: log.warning('Could not convert "%s" into Unix time', value) continue # Convert Unix ticks into an Integer if key in ['build_date_time_t', 'install_date_time_t']: try: pkg_data[key] = int(value) except ValueError: log.warning('Could not convert "%s" into Unix time', value) continue if key not in ['description', 'name'] and value: pkg_data[key] = value if attr and 'description' in attr or not attr: pkg_data['description'] = os.linesep.join(descr) if pkg_name: pkg_data['name'] = pkg_name _ret.append(pkg_data) # Force-sort package data by version, # pick only latest versions # (in case multiple packages installed, e.g. kernel) ret = dict() for pkg_data in reversed(sorted(_ret, key=lambda x: LooseVersion(x['edition']))): pkg_name = pkg_data.pop('name') # Filter out GPG public keys packages if pkg_name.startswith('gpg-pubkey'): continue if pkg_name not in ret: if all_versions: ret[pkg_name] = [pkg_data.copy()] else: ret[pkg_name] = pkg_data.copy() del ret[pkg_name]['edition'] elif all_versions: ret[pkg_name].append(pkg_data.copy()) return ret def version_cmp(ver1, ver2, ignore_epoch=False): ''' .. versionadded:: 2015.8.9 Do a cmp-style comparison on two packages. Return -1 if ver1 < ver2, 0 if ver1 == ver2, and 1 if ver1 > ver2. Return None if there was a problem making the comparison. ignore_epoch : False Set to ``True`` to ignore the epoch when comparing versions .. versionadded:: 2015.8.10,2016.3.2 CLI Example: .. code-block:: bash salt '*' pkg.version_cmp '0.2-001' '0.2.0.1-002' ''' normalize = lambda x: six.text_type(x).split(':', 1)[-1] \ if ignore_epoch \ else six.text_type(x) ver1 = normalize(ver1) ver2 = normalize(ver2) try: cmp_func = None if HAS_RPM: try: cmp_func = rpm.labelCompare except AttributeError: # Catches corner case where someone has a module named "rpm" in # their pythonpath. log.debug( 'rpm module imported, but it does not have the ' 'labelCompare function. Not using rpm.labelCompare for ' 'version comparison.' ) if cmp_func is None and HAS_RPMUTILS: try: cmp_func = rpmUtils.miscutils.compareEVR except AttributeError: log.debug('rpmUtils.miscutils.compareEVR is not available') if cmp_func is None: if salt.utils.path.which('rpmdev-vercmp'): # rpmdev-vercmp always uses epochs, even when zero def _ensure_epoch(ver): def _prepend(ver): return '0:{0}'.format(ver) try: if ':' not in ver: return _prepend(ver) except TypeError: return _prepend(ver) return ver ver1 = _ensure_epoch(ver1) ver2 = _ensure_epoch(ver2) result = __salt__['cmd.run_all']( ['rpmdev-vercmp', ver1, ver2], python_shell=False, redirect_stderr=True, ignore_retcode=True) # rpmdev-vercmp returns 0 on equal, 11 on greater-than, and # 12 on less-than. if result['retcode'] == 0: return 0 elif result['retcode'] == 11: return 1 elif result['retcode'] == 12: return -1 else: # We'll need to fall back to salt.utils.versions.version_cmp() log.warning( 'Failed to interpret results of rpmdev-vercmp output. ' 'This is probably a bug, and should be reported. ' 'Return code was %s. Output: %s', result['retcode'], result['stdout'] ) else: # We'll need to fall back to salt.utils.versions.version_cmp() log.warning( 'rpmdevtools is not installed, please install it for ' 'more accurate version comparisons' ) else: # If one EVR is missing a release but not the other and they # otherwise would be equal, ignore the release. This can happen if # e.g. you are checking if a package version 3.2 is satisfied by # 3.2-1. (ver1_e, ver1_v, ver1_r) = salt.utils.pkg.rpm.version_to_evr(ver1) (ver2_e, ver2_v, ver2_r) = salt.utils.pkg.rpm.version_to_evr(ver2) if not ver1_r or not ver2_r: ver1_r = ver2_r = '' cmp_result = cmp_func((ver1_e, ver1_v, ver1_r), (ver2_e, ver2_v, ver2_r)) if cmp_result not in (-1, 0, 1): raise CommandExecutionError( 'Comparison result \'{0}\' is invalid'.format(cmp_result) ) return cmp_result except Exception as exc: log.warning( 'Failed to compare version \'%s\' to \'%s\' using RPM: %s', ver1, ver2, exc ) # We would already have normalized the versions at the beginning of this # function if ignore_epoch=True, so avoid unnecessary work and just pass # False for this value. return salt.utils.versions.version_cmp(ver1, ver2, ignore_epoch=False) def checksum(*paths, **kwargs): ''' Return if the signature of a RPM file is valid. root use root as top level directory (default: "/") CLI Example: .. code-block:: bash salt '*' lowpkg.checksum /path/to/package1.rpm salt '*' lowpkg.checksum /path/to/package1.rpm /path/to/package2.rpm ''' ret = dict() if not paths: raise CommandExecutionError("No package files has been specified.") cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['-K', '--quiet']) for package_file in paths: cmd_ = cmd + [package_file] ret[package_file] = (bool(__salt__['file.file_exists'](package_file)) and not __salt__['cmd.retcode'](cmd_, ignore_retcode=True, output_loglevel='trace', python_shell=False)) return ret
saltstack/salt
salt/modules/rpm_lowpkg.py
verify
python
def verify(*packages, **kwargs): ''' Runs an rpm -Va on a system, and returns the results in a dict root use root as top level directory (default: "/") Files with an attribute of config, doc, ghost, license or readme in the package header can be ignored using the ``ignore_types`` keyword argument CLI Example: .. code-block:: bash salt '*' lowpkg.verify salt '*' lowpkg.verify httpd salt '*' lowpkg.verify httpd postfix salt '*' lowpkg.verify httpd postfix ignore_types=['config','doc'] ''' ftypes = {'c': 'config', 'd': 'doc', 'g': 'ghost', 'l': 'license', 'r': 'readme'} ret = {} ignore_types = kwargs.get('ignore_types', []) if not isinstance(ignore_types, (list, six.string_types)): raise SaltInvocationError( 'ignore_types must be a list or a comma-separated string' ) if isinstance(ignore_types, six.string_types): try: ignore_types = [x.strip() for x in ignore_types.split(',')] except AttributeError: ignore_types = [x.strip() for x in six.text_type(ignore_types).split(',')] verify_options = kwargs.get('verify_options', []) if not isinstance(verify_options, (list, six.string_types)): raise SaltInvocationError( 'verify_options must be a list or a comma-separated string' ) if isinstance(verify_options, six.string_types): try: verify_options = [x.strip() for x in verify_options.split(',')] except AttributeError: verify_options = [x.strip() for x in six.text_type(verify_options).split(',')] cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['--' + x for x in verify_options]) if packages: cmd.append('-V') # Can't concatenate a tuple, must do a list.extend() cmd.extend(packages) else: cmd.append('-Va') out = __salt__['cmd.run_all'](cmd, output_loglevel='trace', ignore_retcode=True, python_shell=False) if not out['stdout'].strip() and out['retcode'] != 0: # If there is no stdout and the retcode is 0, then verification # succeeded, but if the retcode is nonzero, then the command failed. msg = 'Failed to verify package(s)' if out['stderr']: msg += ': {0}'.format(out['stderr']) raise CommandExecutionError(msg) for line in salt.utils.itertools.split(out['stdout'], '\n'): fdict = {'mismatch': []} if 'missing' in line: line = ' ' + line fdict['missing'] = True del fdict['mismatch'] fname = line[13:] if line[11:12] in ftypes: fdict['type'] = ftypes[line[11:12]] if 'type' not in fdict or fdict['type'] not in ignore_types: if line[0:1] == 'S': fdict['mismatch'].append('size') if line[1:2] == 'M': fdict['mismatch'].append('mode') if line[2:3] == '5': fdict['mismatch'].append('md5sum') if line[3:4] == 'D': fdict['mismatch'].append('device major/minor number') if line[4:5] == 'L': fdict['mismatch'].append('readlink path') if line[5:6] == 'U': fdict['mismatch'].append('user') if line[6:7] == 'G': fdict['mismatch'].append('group') if line[7:8] == 'T': fdict['mismatch'].append('mtime') if line[8:9] == 'P': fdict['mismatch'].append('capabilities') ret[fname] = fdict return ret
Runs an rpm -Va on a system, and returns the results in a dict root use root as top level directory (default: "/") Files with an attribute of config, doc, ghost, license or readme in the package header can be ignored using the ``ignore_types`` keyword argument CLI Example: .. code-block:: bash salt '*' lowpkg.verify salt '*' lowpkg.verify httpd salt '*' lowpkg.verify httpd postfix salt '*' lowpkg.verify httpd postfix ignore_types=['config','doc']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/rpm_lowpkg.py#L163-L262
[ "def split(orig, sep=None):\n '''\n Generator function for iterating through large strings, particularly useful\n as a replacement for str.splitlines().\n\n See http://stackoverflow.com/a/3865367\n '''\n exp = re.compile(r'\\s+' if sep is None else re.escape(sep))\n pos = 0\n length = len(orig)\n while True:\n match = exp.search(orig, pos)\n if not match:\n if pos < length or sep is not None:\n val = orig[pos:]\n if val:\n # Only yield a value if the slice was not an empty string,\n # because if it is then we've reached the end. This keeps\n # us from yielding an extra blank value at the end.\n yield val\n break\n if pos < match.start() or sep is not None:\n yield orig[pos:match.start()]\n pos = match.end()\n" ]
# -*- coding: utf-8 -*- ''' Support for rpm ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import logging import os import re import datetime from salt.utils.versions import LooseVersion # Import Salt libs import salt.utils.decorators.path import salt.utils.itertools import salt.utils.path import salt.utils.pkg.rpm import salt.utils.versions # pylint: disable=import-error,redefined-builtin from salt.ext.six.moves import zip from salt.ext import six try: import rpm HAS_RPM = True except ImportError: HAS_RPM = False try: import rpmUtils.miscutils HAS_RPMUTILS = True except ImportError: HAS_RPMUTILS = False # pylint: enable=import-error,redefined-builtin from salt.exceptions import CommandExecutionError, SaltInvocationError log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'lowpkg' def __virtual__(): ''' Confine this module to rpm based systems ''' if not salt.utils.path.which('rpm'): return (False, 'The rpm execution module failed to load: rpm binary is not in the path.') try: os_grain = __grains__['os'].lower() os_family = __grains__['os_family'].lower() except Exception: return (False, 'The rpm execution module failed to load: failed to detect os or os_family grains.') enabled = ('amazon', 'xcp', 'xenserver', 'VirtuozzoLinux') if os_family in ['redhat', 'suse'] or os_grain in enabled: return __virtualname__ return (False, 'The rpm execution module failed to load: only available on redhat/suse type systems ' 'or amazon, xcp or xenserver.') def bin_pkg_info(path, saltenv='base'): ''' .. versionadded:: 2015.8.0 Parses RPM metadata and returns a dictionary of information about the package (name, version, etc.). path Path to the file. Can either be an absolute path to a file on the minion, or a salt fileserver URL (e.g. ``salt://path/to/file.rpm``). If a salt fileserver URL is passed, the file will be cached to the minion so that it can be examined. saltenv : base Salt fileserver environment from which to retrieve the package. Ignored if ``path`` is a local file path on the minion. CLI Example: .. code-block:: bash salt '*' lowpkg.bin_pkg_info /root/salt-2015.5.1-2.el7.noarch.rpm salt '*' lowpkg.bin_pkg_info salt://salt-2015.5.1-2.el7.noarch.rpm ''' # If the path is a valid protocol, pull it down using cp.cache_file if __salt__['config.valid_fileproto'](path): newpath = __salt__['cp.cache_file'](path, saltenv) if not newpath: raise CommandExecutionError( 'Unable to retrieve {0} from saltenv \'{1}\'' .format(path, saltenv) ) path = newpath else: if not os.path.exists(path): raise CommandExecutionError( '{0} does not exist on minion'.format(path) ) elif not os.path.isabs(path): raise SaltInvocationError( '{0} does not exist on minion'.format(path) ) # REPOID is not a valid tag for the rpm command. Remove it and replace it # with 'none' queryformat = salt.utils.pkg.rpm.QUERYFORMAT.replace('%{REPOID}', 'none') output = __salt__['cmd.run_stdout']( ['rpm', '-qp', '--queryformat', queryformat, path], output_loglevel='trace', ignore_retcode=True, python_shell=False ) ret = {} pkginfo = salt.utils.pkg.rpm.parse_pkginfo( output, osarch=__grains__['osarch'] ) try: for field in pkginfo._fields: ret[field] = getattr(pkginfo, field) except AttributeError: # pkginfo is None return None return ret def list_pkgs(*packages, **kwargs): ''' List the packages currently installed in a dict:: {'<package_name>': '<version>'} root use root as top level directory (default: "/") CLI Example: .. code-block:: bash salt '*' lowpkg.list_pkgs ''' pkgs = {} cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['-q' if packages else '-qa', '--queryformat', r'%{NAME} %{VERSION}\n']) if packages: cmd.extend(packages) out = __salt__['cmd.run'](cmd, output_loglevel='trace', python_shell=False) for line in salt.utils.itertools.split(out, '\n'): if 'is not installed' in line: continue comps = line.split() pkgs[comps[0]] = comps[1] return pkgs def modified(*packages, **flags): ''' List the modified files that belong to a package. Not specifying any packages will return a list of _all_ modified files on the system's RPM database. .. versionadded:: 2015.5.0 root use root as top level directory (default: "/") CLI examples: .. code-block:: bash salt '*' lowpkg.modified httpd salt '*' lowpkg.modified httpd postfix salt '*' lowpkg.modified ''' cmd = ['rpm'] if flags.get('root'): cmd.extend(['--root', flags.pop('root')]) cmd.append('-Va') cmd.extend(packages) ret = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False) data = {} # If verification has an output, then it means it failed # and the return code will be 1. We are interested in any bigger # than 1 code. if ret['retcode'] > 1: del ret['stdout'] return ret elif not ret['retcode']: return data ptrn = re.compile(r"\s+") changes = cfg = f_name = None for f_info in salt.utils.itertools.split(ret['stdout'], '\n'): f_info = ptrn.split(f_info) if len(f_info) == 3: # Config file changes, cfg, f_name = f_info else: changes, f_name = f_info cfg = None keys = ['size', 'mode', 'checksum', 'device', 'symlink', 'owner', 'group', 'time', 'capabilities'] changes = list(changes) if len(changes) == 8: # Older RPMs do not support capabilities changes.append('.') stats = [] for k, v in zip(keys, changes): if v != '.': stats.append(k) if cfg is not None: stats.append('config') data[f_name] = stats if not flags: return data # Filtering filtered_data = {} for f_name, stats in data.items(): include = True for param, pval in flags.items(): if param.startswith("_"): continue if (not pval and param in stats) or \ (pval and param not in stats): include = False break if include: filtered_data[f_name] = stats return filtered_data def file_list(*packages, **kwargs): ''' List the files that belong to a package. Not specifying any packages will return a list of _every_ file on the system's rpm database (not generally recommended). root use root as top level directory (default: "/") CLI Examples: .. code-block:: bash salt '*' lowpkg.file_list httpd salt '*' lowpkg.file_list httpd postfix salt '*' lowpkg.file_list ''' cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.append('-ql' if packages else '-qla') if packages: # Can't concatenate a tuple, must do a list.extend() cmd.extend(packages) ret = __salt__['cmd.run']( cmd, output_loglevel='trace', python_shell=False).splitlines() return {'errors': [], 'files': ret} def file_dict(*packages, **kwargs): ''' List the files that belong to a package, sorted by group. Not specifying any packages will return a list of _every_ file on the system's rpm database (not generally recommended). root use root as top level directory (default: "/") CLI Examples: .. code-block:: bash salt '*' lowpkg.file_dict httpd salt '*' lowpkg.file_dict httpd postfix salt '*' lowpkg.file_dict ''' errors = [] ret = {} pkgs = {} cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['-q' if packages else '-qa', '--queryformat', r'%{NAME} %{VERSION}\n']) if packages: cmd.extend(packages) out = __salt__['cmd.run'](cmd, output_loglevel='trace', python_shell=False) for line in salt.utils.itertools.split(out, '\n'): if 'is not installed' in line: errors.append(line) continue comps = line.split() pkgs[comps[0]] = {'version': comps[1]} for pkg in pkgs: cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['-ql', pkg]) out = __salt__['cmd.run']( ['rpm', '-ql', pkg], output_loglevel='trace', python_shell=False) ret[pkg] = out.splitlines() return {'errors': errors, 'packages': ret} def owner(*paths, **kwargs): ''' Return the name of the package that owns the file. Multiple file paths can be passed. If a single path is passed, a string will be returned, and if multiple paths are passed, a dictionary of file/package name pairs will be returned. If the file is not owned by a package, or is not present on the minion, then an empty string will be returned for that path. root use root as top level directory (default: "/") CLI Examples: .. code-block:: bash salt '*' lowpkg.owner /usr/bin/apachectl salt '*' lowpkg.owner /usr/bin/apachectl /etc/httpd/conf/httpd.conf ''' if not paths: return '' ret = {} for path in paths: cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['-qf', '--queryformat', '%{name}', path]) ret[path] = __salt__['cmd.run_stdout'](cmd, output_loglevel='trace', python_shell=False) if 'not owned' in ret[path].lower(): ret[path] = '' if len(ret) == 1: return list(ret.values())[0] return ret @salt.utils.decorators.path.which('rpm2cpio') @salt.utils.decorators.path.which('cpio') @salt.utils.decorators.path.which('diff') def diff(package_path, path): ''' Return a formatted diff between current file and original in a package. NOTE: this function includes all files (configuration and not), but does not work on binary content. :param package: Full pack of the RPM file :param path: Full path to the installed file :return: Difference or empty string. For binary files only a notification. CLI example: .. code-block:: bash salt '*' lowpkg.diff /path/to/apache2.rpm /etc/apache2/httpd.conf ''' cmd = "rpm2cpio {0} " \ "| cpio -i --quiet --to-stdout .{1} " \ "| diff -u --label 'A {1}' --from-file=- --label 'B {1}' {1}" res = __salt__['cmd.shell'](cmd.format(package_path, path), output_loglevel='trace') if res and res.startswith('Binary file'): return 'File \'{0}\' is binary and its content has been ' \ 'modified.'.format(path) return res def info(*packages, **kwargs): ''' Return a detailed package(s) summary information. If no packages specified, all packages will be returned. :param packages: :param attr: Comma-separated package attributes. If no 'attr' is specified, all available attributes returned. Valid attributes are: version, vendor, release, build_date, build_date_time_t, install_date, install_date_time_t, build_host, group, source_rpm, arch, epoch, size, license, signature, packager, url, summary, description. :param all_versions: Return information for all installed versions of the packages :param root: use root as top level directory (default: "/") :return: CLI example: .. code-block:: bash salt '*' lowpkg.info apache2 bash salt '*' lowpkg.info apache2 bash attr=version salt '*' lowpkg.info apache2 bash attr=version,build_date_iso,size salt '*' lowpkg.info apache2 bash attr=version,build_date_iso,size all_versions=True ''' all_versions = kwargs.get('all_versions', False) # LONGSIZE is not a valid tag for all versions of rpm. If LONGSIZE isn't # available, then we can just use SIZE for older versions. See Issue #31366. rpm_tags = __salt__['cmd.run_stdout']( ['rpm', '--querytags'], python_shell=False).splitlines() if 'LONGSIZE' in rpm_tags: size_tag = '%{LONGSIZE}' else: size_tag = '%{SIZE}' cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) if packages: cmd.append('-q') cmd.extend(packages) else: cmd.append('-qa') # Construct query format attr_map = { "name": "name: %{NAME}\\n", "relocations": "relocations: %|PREFIXES?{[%{PREFIXES} ]}:{(not relocatable)}|\\n", "version": "version: %{VERSION}\\n", "vendor": "vendor: %{VENDOR}\\n", "release": "release: %{RELEASE}\\n", "epoch": "%|EPOCH?{epoch: %{EPOCH}\\n}|", "build_date_time_t": "build_date_time_t: %{BUILDTIME}\\n", "build_date": "build_date: %{BUILDTIME}\\n", "install_date_time_t": "install_date_time_t: %|INSTALLTIME?{%{INSTALLTIME}}:{(not installed)}|\\n", "install_date": "install_date: %|INSTALLTIME?{%{INSTALLTIME}}:{(not installed)}|\\n", "build_host": "build_host: %{BUILDHOST}\\n", "group": "group: %{GROUP}\\n", "source_rpm": "source_rpm: %{SOURCERPM}\\n", "size": "size: " + size_tag + "\\n", "arch": "arch: %{ARCH}\\n", "license": "%|LICENSE?{license: %{LICENSE}\\n}|", "signature": "signature: %|DSAHEADER?{%{DSAHEADER:pgpsig}}:{%|RSAHEADER?{%{RSAHEADER:pgpsig}}:" "{%|SIGGPG?{%{SIGGPG:pgpsig}}:{%|SIGPGP?{%{SIGPGP:pgpsig}}:{(none)}|}|}|}|\\n", "packager": "%|PACKAGER?{packager: %{PACKAGER}\\n}|", "url": "%|URL?{url: %{URL}\\n}|", "summary": "summary: %{SUMMARY}\\n", "description": "description:\\n%{DESCRIPTION}\\n", "edition": "edition: %|EPOCH?{%{EPOCH}:}|%{VERSION}-%{RELEASE}\\n", } attr = kwargs.get('attr', None) and kwargs['attr'].split(",") or None query = list() if attr: for attr_k in attr: if attr_k in attr_map and attr_k != 'description': query.append(attr_map[attr_k]) if not query: raise CommandExecutionError('No valid attributes found.') if 'name' not in attr: attr.append('name') query.append(attr_map['name']) if 'edition' not in attr: attr.append('edition') query.append(attr_map['edition']) else: for attr_k, attr_v in six.iteritems(attr_map): if attr_k != 'description': query.append(attr_v) if attr and 'description' in attr or not attr: query.append(attr_map['description']) query.append("-----\\n") cmd = ' '.join(cmd) call = __salt__['cmd.run_all'](cmd + (" --queryformat '{0}'".format(''.join(query))), output_loglevel='trace', env={'TZ': 'UTC'}, clean_env=True) if call['retcode'] != 0: comment = '' if 'stderr' in call: comment += (call['stderr'] or call['stdout']) raise CommandExecutionError(comment) elif 'error' in call['stderr']: raise CommandExecutionError(call['stderr']) else: out = call['stdout'] _ret = list() for pkg_info in re.split(r"----*", out): pkg_info = pkg_info.strip() if not pkg_info: continue pkg_info = pkg_info.split(os.linesep) if pkg_info[-1].lower().startswith('distribution'): pkg_info = pkg_info[:-1] pkg_data = dict() pkg_name = None descr_marker = False descr = list() for line in pkg_info: if descr_marker: descr.append(line) continue line = [item.strip() for item in line.split(':', 1)] if len(line) != 2: continue key, value = line if key == 'description': descr_marker = True continue if key == 'name': pkg_name = value # Convert Unix ticks into ISO time format if key in ['build_date', 'install_date']: try: pkg_data[key] = datetime.datetime.utcfromtimestamp(int(value)).isoformat() + "Z" except ValueError: log.warning('Could not convert "%s" into Unix time', value) continue # Convert Unix ticks into an Integer if key in ['build_date_time_t', 'install_date_time_t']: try: pkg_data[key] = int(value) except ValueError: log.warning('Could not convert "%s" into Unix time', value) continue if key not in ['description', 'name'] and value: pkg_data[key] = value if attr and 'description' in attr or not attr: pkg_data['description'] = os.linesep.join(descr) if pkg_name: pkg_data['name'] = pkg_name _ret.append(pkg_data) # Force-sort package data by version, # pick only latest versions # (in case multiple packages installed, e.g. kernel) ret = dict() for pkg_data in reversed(sorted(_ret, key=lambda x: LooseVersion(x['edition']))): pkg_name = pkg_data.pop('name') # Filter out GPG public keys packages if pkg_name.startswith('gpg-pubkey'): continue if pkg_name not in ret: if all_versions: ret[pkg_name] = [pkg_data.copy()] else: ret[pkg_name] = pkg_data.copy() del ret[pkg_name]['edition'] elif all_versions: ret[pkg_name].append(pkg_data.copy()) return ret def version_cmp(ver1, ver2, ignore_epoch=False): ''' .. versionadded:: 2015.8.9 Do a cmp-style comparison on two packages. Return -1 if ver1 < ver2, 0 if ver1 == ver2, and 1 if ver1 > ver2. Return None if there was a problem making the comparison. ignore_epoch : False Set to ``True`` to ignore the epoch when comparing versions .. versionadded:: 2015.8.10,2016.3.2 CLI Example: .. code-block:: bash salt '*' pkg.version_cmp '0.2-001' '0.2.0.1-002' ''' normalize = lambda x: six.text_type(x).split(':', 1)[-1] \ if ignore_epoch \ else six.text_type(x) ver1 = normalize(ver1) ver2 = normalize(ver2) try: cmp_func = None if HAS_RPM: try: cmp_func = rpm.labelCompare except AttributeError: # Catches corner case where someone has a module named "rpm" in # their pythonpath. log.debug( 'rpm module imported, but it does not have the ' 'labelCompare function. Not using rpm.labelCompare for ' 'version comparison.' ) if cmp_func is None and HAS_RPMUTILS: try: cmp_func = rpmUtils.miscutils.compareEVR except AttributeError: log.debug('rpmUtils.miscutils.compareEVR is not available') if cmp_func is None: if salt.utils.path.which('rpmdev-vercmp'): # rpmdev-vercmp always uses epochs, even when zero def _ensure_epoch(ver): def _prepend(ver): return '0:{0}'.format(ver) try: if ':' not in ver: return _prepend(ver) except TypeError: return _prepend(ver) return ver ver1 = _ensure_epoch(ver1) ver2 = _ensure_epoch(ver2) result = __salt__['cmd.run_all']( ['rpmdev-vercmp', ver1, ver2], python_shell=False, redirect_stderr=True, ignore_retcode=True) # rpmdev-vercmp returns 0 on equal, 11 on greater-than, and # 12 on less-than. if result['retcode'] == 0: return 0 elif result['retcode'] == 11: return 1 elif result['retcode'] == 12: return -1 else: # We'll need to fall back to salt.utils.versions.version_cmp() log.warning( 'Failed to interpret results of rpmdev-vercmp output. ' 'This is probably a bug, and should be reported. ' 'Return code was %s. Output: %s', result['retcode'], result['stdout'] ) else: # We'll need to fall back to salt.utils.versions.version_cmp() log.warning( 'rpmdevtools is not installed, please install it for ' 'more accurate version comparisons' ) else: # If one EVR is missing a release but not the other and they # otherwise would be equal, ignore the release. This can happen if # e.g. you are checking if a package version 3.2 is satisfied by # 3.2-1. (ver1_e, ver1_v, ver1_r) = salt.utils.pkg.rpm.version_to_evr(ver1) (ver2_e, ver2_v, ver2_r) = salt.utils.pkg.rpm.version_to_evr(ver2) if not ver1_r or not ver2_r: ver1_r = ver2_r = '' cmp_result = cmp_func((ver1_e, ver1_v, ver1_r), (ver2_e, ver2_v, ver2_r)) if cmp_result not in (-1, 0, 1): raise CommandExecutionError( 'Comparison result \'{0}\' is invalid'.format(cmp_result) ) return cmp_result except Exception as exc: log.warning( 'Failed to compare version \'%s\' to \'%s\' using RPM: %s', ver1, ver2, exc ) # We would already have normalized the versions at the beginning of this # function if ignore_epoch=True, so avoid unnecessary work and just pass # False for this value. return salt.utils.versions.version_cmp(ver1, ver2, ignore_epoch=False) def checksum(*paths, **kwargs): ''' Return if the signature of a RPM file is valid. root use root as top level directory (default: "/") CLI Example: .. code-block:: bash salt '*' lowpkg.checksum /path/to/package1.rpm salt '*' lowpkg.checksum /path/to/package1.rpm /path/to/package2.rpm ''' ret = dict() if not paths: raise CommandExecutionError("No package files has been specified.") cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['-K', '--quiet']) for package_file in paths: cmd_ = cmd + [package_file] ret[package_file] = (bool(__salt__['file.file_exists'](package_file)) and not __salt__['cmd.retcode'](cmd_, ignore_retcode=True, output_loglevel='trace', python_shell=False)) return ret
saltstack/salt
salt/modules/rpm_lowpkg.py
modified
python
def modified(*packages, **flags): ''' List the modified files that belong to a package. Not specifying any packages will return a list of _all_ modified files on the system's RPM database. .. versionadded:: 2015.5.0 root use root as top level directory (default: "/") CLI examples: .. code-block:: bash salt '*' lowpkg.modified httpd salt '*' lowpkg.modified httpd postfix salt '*' lowpkg.modified ''' cmd = ['rpm'] if flags.get('root'): cmd.extend(['--root', flags.pop('root')]) cmd.append('-Va') cmd.extend(packages) ret = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False) data = {} # If verification has an output, then it means it failed # and the return code will be 1. We are interested in any bigger # than 1 code. if ret['retcode'] > 1: del ret['stdout'] return ret elif not ret['retcode']: return data ptrn = re.compile(r"\s+") changes = cfg = f_name = None for f_info in salt.utils.itertools.split(ret['stdout'], '\n'): f_info = ptrn.split(f_info) if len(f_info) == 3: # Config file changes, cfg, f_name = f_info else: changes, f_name = f_info cfg = None keys = ['size', 'mode', 'checksum', 'device', 'symlink', 'owner', 'group', 'time', 'capabilities'] changes = list(changes) if len(changes) == 8: # Older RPMs do not support capabilities changes.append('.') stats = [] for k, v in zip(keys, changes): if v != '.': stats.append(k) if cfg is not None: stats.append('config') data[f_name] = stats if not flags: return data # Filtering filtered_data = {} for f_name, stats in data.items(): include = True for param, pval in flags.items(): if param.startswith("_"): continue if (not pval and param in stats) or \ (pval and param not in stats): include = False break if include: filtered_data[f_name] = stats return filtered_data
List the modified files that belong to a package. Not specifying any packages will return a list of _all_ modified files on the system's RPM database. .. versionadded:: 2015.5.0 root use root as top level directory (default: "/") CLI examples: .. code-block:: bash salt '*' lowpkg.modified httpd salt '*' lowpkg.modified httpd postfix salt '*' lowpkg.modified
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/rpm_lowpkg.py#L265-L340
[ "def split(orig, sep=None):\n '''\n Generator function for iterating through large strings, particularly useful\n as a replacement for str.splitlines().\n\n See http://stackoverflow.com/a/3865367\n '''\n exp = re.compile(r'\\s+' if sep is None else re.escape(sep))\n pos = 0\n length = len(orig)\n while True:\n match = exp.search(orig, pos)\n if not match:\n if pos < length or sep is not None:\n val = orig[pos:]\n if val:\n # Only yield a value if the slice was not an empty string,\n # because if it is then we've reached the end. This keeps\n # us from yielding an extra blank value at the end.\n yield val\n break\n if pos < match.start() or sep is not None:\n yield orig[pos:match.start()]\n pos = match.end()\n" ]
# -*- coding: utf-8 -*- ''' Support for rpm ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import logging import os import re import datetime from salt.utils.versions import LooseVersion # Import Salt libs import salt.utils.decorators.path import salt.utils.itertools import salt.utils.path import salt.utils.pkg.rpm import salt.utils.versions # pylint: disable=import-error,redefined-builtin from salt.ext.six.moves import zip from salt.ext import six try: import rpm HAS_RPM = True except ImportError: HAS_RPM = False try: import rpmUtils.miscutils HAS_RPMUTILS = True except ImportError: HAS_RPMUTILS = False # pylint: enable=import-error,redefined-builtin from salt.exceptions import CommandExecutionError, SaltInvocationError log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'lowpkg' def __virtual__(): ''' Confine this module to rpm based systems ''' if not salt.utils.path.which('rpm'): return (False, 'The rpm execution module failed to load: rpm binary is not in the path.') try: os_grain = __grains__['os'].lower() os_family = __grains__['os_family'].lower() except Exception: return (False, 'The rpm execution module failed to load: failed to detect os or os_family grains.') enabled = ('amazon', 'xcp', 'xenserver', 'VirtuozzoLinux') if os_family in ['redhat', 'suse'] or os_grain in enabled: return __virtualname__ return (False, 'The rpm execution module failed to load: only available on redhat/suse type systems ' 'or amazon, xcp or xenserver.') def bin_pkg_info(path, saltenv='base'): ''' .. versionadded:: 2015.8.0 Parses RPM metadata and returns a dictionary of information about the package (name, version, etc.). path Path to the file. Can either be an absolute path to a file on the minion, or a salt fileserver URL (e.g. ``salt://path/to/file.rpm``). If a salt fileserver URL is passed, the file will be cached to the minion so that it can be examined. saltenv : base Salt fileserver environment from which to retrieve the package. Ignored if ``path`` is a local file path on the minion. CLI Example: .. code-block:: bash salt '*' lowpkg.bin_pkg_info /root/salt-2015.5.1-2.el7.noarch.rpm salt '*' lowpkg.bin_pkg_info salt://salt-2015.5.1-2.el7.noarch.rpm ''' # If the path is a valid protocol, pull it down using cp.cache_file if __salt__['config.valid_fileproto'](path): newpath = __salt__['cp.cache_file'](path, saltenv) if not newpath: raise CommandExecutionError( 'Unable to retrieve {0} from saltenv \'{1}\'' .format(path, saltenv) ) path = newpath else: if not os.path.exists(path): raise CommandExecutionError( '{0} does not exist on minion'.format(path) ) elif not os.path.isabs(path): raise SaltInvocationError( '{0} does not exist on minion'.format(path) ) # REPOID is not a valid tag for the rpm command. Remove it and replace it # with 'none' queryformat = salt.utils.pkg.rpm.QUERYFORMAT.replace('%{REPOID}', 'none') output = __salt__['cmd.run_stdout']( ['rpm', '-qp', '--queryformat', queryformat, path], output_loglevel='trace', ignore_retcode=True, python_shell=False ) ret = {} pkginfo = salt.utils.pkg.rpm.parse_pkginfo( output, osarch=__grains__['osarch'] ) try: for field in pkginfo._fields: ret[field] = getattr(pkginfo, field) except AttributeError: # pkginfo is None return None return ret def list_pkgs(*packages, **kwargs): ''' List the packages currently installed in a dict:: {'<package_name>': '<version>'} root use root as top level directory (default: "/") CLI Example: .. code-block:: bash salt '*' lowpkg.list_pkgs ''' pkgs = {} cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['-q' if packages else '-qa', '--queryformat', r'%{NAME} %{VERSION}\n']) if packages: cmd.extend(packages) out = __salt__['cmd.run'](cmd, output_loglevel='trace', python_shell=False) for line in salt.utils.itertools.split(out, '\n'): if 'is not installed' in line: continue comps = line.split() pkgs[comps[0]] = comps[1] return pkgs def verify(*packages, **kwargs): ''' Runs an rpm -Va on a system, and returns the results in a dict root use root as top level directory (default: "/") Files with an attribute of config, doc, ghost, license or readme in the package header can be ignored using the ``ignore_types`` keyword argument CLI Example: .. code-block:: bash salt '*' lowpkg.verify salt '*' lowpkg.verify httpd salt '*' lowpkg.verify httpd postfix salt '*' lowpkg.verify httpd postfix ignore_types=['config','doc'] ''' ftypes = {'c': 'config', 'd': 'doc', 'g': 'ghost', 'l': 'license', 'r': 'readme'} ret = {} ignore_types = kwargs.get('ignore_types', []) if not isinstance(ignore_types, (list, six.string_types)): raise SaltInvocationError( 'ignore_types must be a list or a comma-separated string' ) if isinstance(ignore_types, six.string_types): try: ignore_types = [x.strip() for x in ignore_types.split(',')] except AttributeError: ignore_types = [x.strip() for x in six.text_type(ignore_types).split(',')] verify_options = kwargs.get('verify_options', []) if not isinstance(verify_options, (list, six.string_types)): raise SaltInvocationError( 'verify_options must be a list or a comma-separated string' ) if isinstance(verify_options, six.string_types): try: verify_options = [x.strip() for x in verify_options.split(',')] except AttributeError: verify_options = [x.strip() for x in six.text_type(verify_options).split(',')] cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['--' + x for x in verify_options]) if packages: cmd.append('-V') # Can't concatenate a tuple, must do a list.extend() cmd.extend(packages) else: cmd.append('-Va') out = __salt__['cmd.run_all'](cmd, output_loglevel='trace', ignore_retcode=True, python_shell=False) if not out['stdout'].strip() and out['retcode'] != 0: # If there is no stdout and the retcode is 0, then verification # succeeded, but if the retcode is nonzero, then the command failed. msg = 'Failed to verify package(s)' if out['stderr']: msg += ': {0}'.format(out['stderr']) raise CommandExecutionError(msg) for line in salt.utils.itertools.split(out['stdout'], '\n'): fdict = {'mismatch': []} if 'missing' in line: line = ' ' + line fdict['missing'] = True del fdict['mismatch'] fname = line[13:] if line[11:12] in ftypes: fdict['type'] = ftypes[line[11:12]] if 'type' not in fdict or fdict['type'] not in ignore_types: if line[0:1] == 'S': fdict['mismatch'].append('size') if line[1:2] == 'M': fdict['mismatch'].append('mode') if line[2:3] == '5': fdict['mismatch'].append('md5sum') if line[3:4] == 'D': fdict['mismatch'].append('device major/minor number') if line[4:5] == 'L': fdict['mismatch'].append('readlink path') if line[5:6] == 'U': fdict['mismatch'].append('user') if line[6:7] == 'G': fdict['mismatch'].append('group') if line[7:8] == 'T': fdict['mismatch'].append('mtime') if line[8:9] == 'P': fdict['mismatch'].append('capabilities') ret[fname] = fdict return ret def file_list(*packages, **kwargs): ''' List the files that belong to a package. Not specifying any packages will return a list of _every_ file on the system's rpm database (not generally recommended). root use root as top level directory (default: "/") CLI Examples: .. code-block:: bash salt '*' lowpkg.file_list httpd salt '*' lowpkg.file_list httpd postfix salt '*' lowpkg.file_list ''' cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.append('-ql' if packages else '-qla') if packages: # Can't concatenate a tuple, must do a list.extend() cmd.extend(packages) ret = __salt__['cmd.run']( cmd, output_loglevel='trace', python_shell=False).splitlines() return {'errors': [], 'files': ret} def file_dict(*packages, **kwargs): ''' List the files that belong to a package, sorted by group. Not specifying any packages will return a list of _every_ file on the system's rpm database (not generally recommended). root use root as top level directory (default: "/") CLI Examples: .. code-block:: bash salt '*' lowpkg.file_dict httpd salt '*' lowpkg.file_dict httpd postfix salt '*' lowpkg.file_dict ''' errors = [] ret = {} pkgs = {} cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['-q' if packages else '-qa', '--queryformat', r'%{NAME} %{VERSION}\n']) if packages: cmd.extend(packages) out = __salt__['cmd.run'](cmd, output_loglevel='trace', python_shell=False) for line in salt.utils.itertools.split(out, '\n'): if 'is not installed' in line: errors.append(line) continue comps = line.split() pkgs[comps[0]] = {'version': comps[1]} for pkg in pkgs: cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['-ql', pkg]) out = __salt__['cmd.run']( ['rpm', '-ql', pkg], output_loglevel='trace', python_shell=False) ret[pkg] = out.splitlines() return {'errors': errors, 'packages': ret} def owner(*paths, **kwargs): ''' Return the name of the package that owns the file. Multiple file paths can be passed. If a single path is passed, a string will be returned, and if multiple paths are passed, a dictionary of file/package name pairs will be returned. If the file is not owned by a package, or is not present on the minion, then an empty string will be returned for that path. root use root as top level directory (default: "/") CLI Examples: .. code-block:: bash salt '*' lowpkg.owner /usr/bin/apachectl salt '*' lowpkg.owner /usr/bin/apachectl /etc/httpd/conf/httpd.conf ''' if not paths: return '' ret = {} for path in paths: cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['-qf', '--queryformat', '%{name}', path]) ret[path] = __salt__['cmd.run_stdout'](cmd, output_loglevel='trace', python_shell=False) if 'not owned' in ret[path].lower(): ret[path] = '' if len(ret) == 1: return list(ret.values())[0] return ret @salt.utils.decorators.path.which('rpm2cpio') @salt.utils.decorators.path.which('cpio') @salt.utils.decorators.path.which('diff') def diff(package_path, path): ''' Return a formatted diff between current file and original in a package. NOTE: this function includes all files (configuration and not), but does not work on binary content. :param package: Full pack of the RPM file :param path: Full path to the installed file :return: Difference or empty string. For binary files only a notification. CLI example: .. code-block:: bash salt '*' lowpkg.diff /path/to/apache2.rpm /etc/apache2/httpd.conf ''' cmd = "rpm2cpio {0} " \ "| cpio -i --quiet --to-stdout .{1} " \ "| diff -u --label 'A {1}' --from-file=- --label 'B {1}' {1}" res = __salt__['cmd.shell'](cmd.format(package_path, path), output_loglevel='trace') if res and res.startswith('Binary file'): return 'File \'{0}\' is binary and its content has been ' \ 'modified.'.format(path) return res def info(*packages, **kwargs): ''' Return a detailed package(s) summary information. If no packages specified, all packages will be returned. :param packages: :param attr: Comma-separated package attributes. If no 'attr' is specified, all available attributes returned. Valid attributes are: version, vendor, release, build_date, build_date_time_t, install_date, install_date_time_t, build_host, group, source_rpm, arch, epoch, size, license, signature, packager, url, summary, description. :param all_versions: Return information for all installed versions of the packages :param root: use root as top level directory (default: "/") :return: CLI example: .. code-block:: bash salt '*' lowpkg.info apache2 bash salt '*' lowpkg.info apache2 bash attr=version salt '*' lowpkg.info apache2 bash attr=version,build_date_iso,size salt '*' lowpkg.info apache2 bash attr=version,build_date_iso,size all_versions=True ''' all_versions = kwargs.get('all_versions', False) # LONGSIZE is not a valid tag for all versions of rpm. If LONGSIZE isn't # available, then we can just use SIZE for older versions. See Issue #31366. rpm_tags = __salt__['cmd.run_stdout']( ['rpm', '--querytags'], python_shell=False).splitlines() if 'LONGSIZE' in rpm_tags: size_tag = '%{LONGSIZE}' else: size_tag = '%{SIZE}' cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) if packages: cmd.append('-q') cmd.extend(packages) else: cmd.append('-qa') # Construct query format attr_map = { "name": "name: %{NAME}\\n", "relocations": "relocations: %|PREFIXES?{[%{PREFIXES} ]}:{(not relocatable)}|\\n", "version": "version: %{VERSION}\\n", "vendor": "vendor: %{VENDOR}\\n", "release": "release: %{RELEASE}\\n", "epoch": "%|EPOCH?{epoch: %{EPOCH}\\n}|", "build_date_time_t": "build_date_time_t: %{BUILDTIME}\\n", "build_date": "build_date: %{BUILDTIME}\\n", "install_date_time_t": "install_date_time_t: %|INSTALLTIME?{%{INSTALLTIME}}:{(not installed)}|\\n", "install_date": "install_date: %|INSTALLTIME?{%{INSTALLTIME}}:{(not installed)}|\\n", "build_host": "build_host: %{BUILDHOST}\\n", "group": "group: %{GROUP}\\n", "source_rpm": "source_rpm: %{SOURCERPM}\\n", "size": "size: " + size_tag + "\\n", "arch": "arch: %{ARCH}\\n", "license": "%|LICENSE?{license: %{LICENSE}\\n}|", "signature": "signature: %|DSAHEADER?{%{DSAHEADER:pgpsig}}:{%|RSAHEADER?{%{RSAHEADER:pgpsig}}:" "{%|SIGGPG?{%{SIGGPG:pgpsig}}:{%|SIGPGP?{%{SIGPGP:pgpsig}}:{(none)}|}|}|}|\\n", "packager": "%|PACKAGER?{packager: %{PACKAGER}\\n}|", "url": "%|URL?{url: %{URL}\\n}|", "summary": "summary: %{SUMMARY}\\n", "description": "description:\\n%{DESCRIPTION}\\n", "edition": "edition: %|EPOCH?{%{EPOCH}:}|%{VERSION}-%{RELEASE}\\n", } attr = kwargs.get('attr', None) and kwargs['attr'].split(",") or None query = list() if attr: for attr_k in attr: if attr_k in attr_map and attr_k != 'description': query.append(attr_map[attr_k]) if not query: raise CommandExecutionError('No valid attributes found.') if 'name' not in attr: attr.append('name') query.append(attr_map['name']) if 'edition' not in attr: attr.append('edition') query.append(attr_map['edition']) else: for attr_k, attr_v in six.iteritems(attr_map): if attr_k != 'description': query.append(attr_v) if attr and 'description' in attr or not attr: query.append(attr_map['description']) query.append("-----\\n") cmd = ' '.join(cmd) call = __salt__['cmd.run_all'](cmd + (" --queryformat '{0}'".format(''.join(query))), output_loglevel='trace', env={'TZ': 'UTC'}, clean_env=True) if call['retcode'] != 0: comment = '' if 'stderr' in call: comment += (call['stderr'] or call['stdout']) raise CommandExecutionError(comment) elif 'error' in call['stderr']: raise CommandExecutionError(call['stderr']) else: out = call['stdout'] _ret = list() for pkg_info in re.split(r"----*", out): pkg_info = pkg_info.strip() if not pkg_info: continue pkg_info = pkg_info.split(os.linesep) if pkg_info[-1].lower().startswith('distribution'): pkg_info = pkg_info[:-1] pkg_data = dict() pkg_name = None descr_marker = False descr = list() for line in pkg_info: if descr_marker: descr.append(line) continue line = [item.strip() for item in line.split(':', 1)] if len(line) != 2: continue key, value = line if key == 'description': descr_marker = True continue if key == 'name': pkg_name = value # Convert Unix ticks into ISO time format if key in ['build_date', 'install_date']: try: pkg_data[key] = datetime.datetime.utcfromtimestamp(int(value)).isoformat() + "Z" except ValueError: log.warning('Could not convert "%s" into Unix time', value) continue # Convert Unix ticks into an Integer if key in ['build_date_time_t', 'install_date_time_t']: try: pkg_data[key] = int(value) except ValueError: log.warning('Could not convert "%s" into Unix time', value) continue if key not in ['description', 'name'] and value: pkg_data[key] = value if attr and 'description' in attr or not attr: pkg_data['description'] = os.linesep.join(descr) if pkg_name: pkg_data['name'] = pkg_name _ret.append(pkg_data) # Force-sort package data by version, # pick only latest versions # (in case multiple packages installed, e.g. kernel) ret = dict() for pkg_data in reversed(sorted(_ret, key=lambda x: LooseVersion(x['edition']))): pkg_name = pkg_data.pop('name') # Filter out GPG public keys packages if pkg_name.startswith('gpg-pubkey'): continue if pkg_name not in ret: if all_versions: ret[pkg_name] = [pkg_data.copy()] else: ret[pkg_name] = pkg_data.copy() del ret[pkg_name]['edition'] elif all_versions: ret[pkg_name].append(pkg_data.copy()) return ret def version_cmp(ver1, ver2, ignore_epoch=False): ''' .. versionadded:: 2015.8.9 Do a cmp-style comparison on two packages. Return -1 if ver1 < ver2, 0 if ver1 == ver2, and 1 if ver1 > ver2. Return None if there was a problem making the comparison. ignore_epoch : False Set to ``True`` to ignore the epoch when comparing versions .. versionadded:: 2015.8.10,2016.3.2 CLI Example: .. code-block:: bash salt '*' pkg.version_cmp '0.2-001' '0.2.0.1-002' ''' normalize = lambda x: six.text_type(x).split(':', 1)[-1] \ if ignore_epoch \ else six.text_type(x) ver1 = normalize(ver1) ver2 = normalize(ver2) try: cmp_func = None if HAS_RPM: try: cmp_func = rpm.labelCompare except AttributeError: # Catches corner case where someone has a module named "rpm" in # their pythonpath. log.debug( 'rpm module imported, but it does not have the ' 'labelCompare function. Not using rpm.labelCompare for ' 'version comparison.' ) if cmp_func is None and HAS_RPMUTILS: try: cmp_func = rpmUtils.miscutils.compareEVR except AttributeError: log.debug('rpmUtils.miscutils.compareEVR is not available') if cmp_func is None: if salt.utils.path.which('rpmdev-vercmp'): # rpmdev-vercmp always uses epochs, even when zero def _ensure_epoch(ver): def _prepend(ver): return '0:{0}'.format(ver) try: if ':' not in ver: return _prepend(ver) except TypeError: return _prepend(ver) return ver ver1 = _ensure_epoch(ver1) ver2 = _ensure_epoch(ver2) result = __salt__['cmd.run_all']( ['rpmdev-vercmp', ver1, ver2], python_shell=False, redirect_stderr=True, ignore_retcode=True) # rpmdev-vercmp returns 0 on equal, 11 on greater-than, and # 12 on less-than. if result['retcode'] == 0: return 0 elif result['retcode'] == 11: return 1 elif result['retcode'] == 12: return -1 else: # We'll need to fall back to salt.utils.versions.version_cmp() log.warning( 'Failed to interpret results of rpmdev-vercmp output. ' 'This is probably a bug, and should be reported. ' 'Return code was %s. Output: %s', result['retcode'], result['stdout'] ) else: # We'll need to fall back to salt.utils.versions.version_cmp() log.warning( 'rpmdevtools is not installed, please install it for ' 'more accurate version comparisons' ) else: # If one EVR is missing a release but not the other and they # otherwise would be equal, ignore the release. This can happen if # e.g. you are checking if a package version 3.2 is satisfied by # 3.2-1. (ver1_e, ver1_v, ver1_r) = salt.utils.pkg.rpm.version_to_evr(ver1) (ver2_e, ver2_v, ver2_r) = salt.utils.pkg.rpm.version_to_evr(ver2) if not ver1_r or not ver2_r: ver1_r = ver2_r = '' cmp_result = cmp_func((ver1_e, ver1_v, ver1_r), (ver2_e, ver2_v, ver2_r)) if cmp_result not in (-1, 0, 1): raise CommandExecutionError( 'Comparison result \'{0}\' is invalid'.format(cmp_result) ) return cmp_result except Exception as exc: log.warning( 'Failed to compare version \'%s\' to \'%s\' using RPM: %s', ver1, ver2, exc ) # We would already have normalized the versions at the beginning of this # function if ignore_epoch=True, so avoid unnecessary work and just pass # False for this value. return salt.utils.versions.version_cmp(ver1, ver2, ignore_epoch=False) def checksum(*paths, **kwargs): ''' Return if the signature of a RPM file is valid. root use root as top level directory (default: "/") CLI Example: .. code-block:: bash salt '*' lowpkg.checksum /path/to/package1.rpm salt '*' lowpkg.checksum /path/to/package1.rpm /path/to/package2.rpm ''' ret = dict() if not paths: raise CommandExecutionError("No package files has been specified.") cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['-K', '--quiet']) for package_file in paths: cmd_ = cmd + [package_file] ret[package_file] = (bool(__salt__['file.file_exists'](package_file)) and not __salt__['cmd.retcode'](cmd_, ignore_retcode=True, output_loglevel='trace', python_shell=False)) return ret
saltstack/salt
salt/modules/rpm_lowpkg.py
file_list
python
def file_list(*packages, **kwargs): ''' List the files that belong to a package. Not specifying any packages will return a list of _every_ file on the system's rpm database (not generally recommended). root use root as top level directory (default: "/") CLI Examples: .. code-block:: bash salt '*' lowpkg.file_list httpd salt '*' lowpkg.file_list httpd postfix salt '*' lowpkg.file_list ''' cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.append('-ql' if packages else '-qla') if packages: # Can't concatenate a tuple, must do a list.extend() cmd.extend(packages) ret = __salt__['cmd.run']( cmd, output_loglevel='trace', python_shell=False).splitlines() return {'errors': [], 'files': ret}
List the files that belong to a package. Not specifying any packages will return a list of _every_ file on the system's rpm database (not generally recommended). root use root as top level directory (default: "/") CLI Examples: .. code-block:: bash salt '*' lowpkg.file_list httpd salt '*' lowpkg.file_list httpd postfix salt '*' lowpkg.file_list
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/rpm_lowpkg.py#L343-L373
null
# -*- coding: utf-8 -*- ''' Support for rpm ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import logging import os import re import datetime from salt.utils.versions import LooseVersion # Import Salt libs import salt.utils.decorators.path import salt.utils.itertools import salt.utils.path import salt.utils.pkg.rpm import salt.utils.versions # pylint: disable=import-error,redefined-builtin from salt.ext.six.moves import zip from salt.ext import six try: import rpm HAS_RPM = True except ImportError: HAS_RPM = False try: import rpmUtils.miscutils HAS_RPMUTILS = True except ImportError: HAS_RPMUTILS = False # pylint: enable=import-error,redefined-builtin from salt.exceptions import CommandExecutionError, SaltInvocationError log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'lowpkg' def __virtual__(): ''' Confine this module to rpm based systems ''' if not salt.utils.path.which('rpm'): return (False, 'The rpm execution module failed to load: rpm binary is not in the path.') try: os_grain = __grains__['os'].lower() os_family = __grains__['os_family'].lower() except Exception: return (False, 'The rpm execution module failed to load: failed to detect os or os_family grains.') enabled = ('amazon', 'xcp', 'xenserver', 'VirtuozzoLinux') if os_family in ['redhat', 'suse'] or os_grain in enabled: return __virtualname__ return (False, 'The rpm execution module failed to load: only available on redhat/suse type systems ' 'or amazon, xcp or xenserver.') def bin_pkg_info(path, saltenv='base'): ''' .. versionadded:: 2015.8.0 Parses RPM metadata and returns a dictionary of information about the package (name, version, etc.). path Path to the file. Can either be an absolute path to a file on the minion, or a salt fileserver URL (e.g. ``salt://path/to/file.rpm``). If a salt fileserver URL is passed, the file will be cached to the minion so that it can be examined. saltenv : base Salt fileserver environment from which to retrieve the package. Ignored if ``path`` is a local file path on the minion. CLI Example: .. code-block:: bash salt '*' lowpkg.bin_pkg_info /root/salt-2015.5.1-2.el7.noarch.rpm salt '*' lowpkg.bin_pkg_info salt://salt-2015.5.1-2.el7.noarch.rpm ''' # If the path is a valid protocol, pull it down using cp.cache_file if __salt__['config.valid_fileproto'](path): newpath = __salt__['cp.cache_file'](path, saltenv) if not newpath: raise CommandExecutionError( 'Unable to retrieve {0} from saltenv \'{1}\'' .format(path, saltenv) ) path = newpath else: if not os.path.exists(path): raise CommandExecutionError( '{0} does not exist on minion'.format(path) ) elif not os.path.isabs(path): raise SaltInvocationError( '{0} does not exist on minion'.format(path) ) # REPOID is not a valid tag for the rpm command. Remove it and replace it # with 'none' queryformat = salt.utils.pkg.rpm.QUERYFORMAT.replace('%{REPOID}', 'none') output = __salt__['cmd.run_stdout']( ['rpm', '-qp', '--queryformat', queryformat, path], output_loglevel='trace', ignore_retcode=True, python_shell=False ) ret = {} pkginfo = salt.utils.pkg.rpm.parse_pkginfo( output, osarch=__grains__['osarch'] ) try: for field in pkginfo._fields: ret[field] = getattr(pkginfo, field) except AttributeError: # pkginfo is None return None return ret def list_pkgs(*packages, **kwargs): ''' List the packages currently installed in a dict:: {'<package_name>': '<version>'} root use root as top level directory (default: "/") CLI Example: .. code-block:: bash salt '*' lowpkg.list_pkgs ''' pkgs = {} cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['-q' if packages else '-qa', '--queryformat', r'%{NAME} %{VERSION}\n']) if packages: cmd.extend(packages) out = __salt__['cmd.run'](cmd, output_loglevel='trace', python_shell=False) for line in salt.utils.itertools.split(out, '\n'): if 'is not installed' in line: continue comps = line.split() pkgs[comps[0]] = comps[1] return pkgs def verify(*packages, **kwargs): ''' Runs an rpm -Va on a system, and returns the results in a dict root use root as top level directory (default: "/") Files with an attribute of config, doc, ghost, license or readme in the package header can be ignored using the ``ignore_types`` keyword argument CLI Example: .. code-block:: bash salt '*' lowpkg.verify salt '*' lowpkg.verify httpd salt '*' lowpkg.verify httpd postfix salt '*' lowpkg.verify httpd postfix ignore_types=['config','doc'] ''' ftypes = {'c': 'config', 'd': 'doc', 'g': 'ghost', 'l': 'license', 'r': 'readme'} ret = {} ignore_types = kwargs.get('ignore_types', []) if not isinstance(ignore_types, (list, six.string_types)): raise SaltInvocationError( 'ignore_types must be a list or a comma-separated string' ) if isinstance(ignore_types, six.string_types): try: ignore_types = [x.strip() for x in ignore_types.split(',')] except AttributeError: ignore_types = [x.strip() for x in six.text_type(ignore_types).split(',')] verify_options = kwargs.get('verify_options', []) if not isinstance(verify_options, (list, six.string_types)): raise SaltInvocationError( 'verify_options must be a list or a comma-separated string' ) if isinstance(verify_options, six.string_types): try: verify_options = [x.strip() for x in verify_options.split(',')] except AttributeError: verify_options = [x.strip() for x in six.text_type(verify_options).split(',')] cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['--' + x for x in verify_options]) if packages: cmd.append('-V') # Can't concatenate a tuple, must do a list.extend() cmd.extend(packages) else: cmd.append('-Va') out = __salt__['cmd.run_all'](cmd, output_loglevel='trace', ignore_retcode=True, python_shell=False) if not out['stdout'].strip() and out['retcode'] != 0: # If there is no stdout and the retcode is 0, then verification # succeeded, but if the retcode is nonzero, then the command failed. msg = 'Failed to verify package(s)' if out['stderr']: msg += ': {0}'.format(out['stderr']) raise CommandExecutionError(msg) for line in salt.utils.itertools.split(out['stdout'], '\n'): fdict = {'mismatch': []} if 'missing' in line: line = ' ' + line fdict['missing'] = True del fdict['mismatch'] fname = line[13:] if line[11:12] in ftypes: fdict['type'] = ftypes[line[11:12]] if 'type' not in fdict or fdict['type'] not in ignore_types: if line[0:1] == 'S': fdict['mismatch'].append('size') if line[1:2] == 'M': fdict['mismatch'].append('mode') if line[2:3] == '5': fdict['mismatch'].append('md5sum') if line[3:4] == 'D': fdict['mismatch'].append('device major/minor number') if line[4:5] == 'L': fdict['mismatch'].append('readlink path') if line[5:6] == 'U': fdict['mismatch'].append('user') if line[6:7] == 'G': fdict['mismatch'].append('group') if line[7:8] == 'T': fdict['mismatch'].append('mtime') if line[8:9] == 'P': fdict['mismatch'].append('capabilities') ret[fname] = fdict return ret def modified(*packages, **flags): ''' List the modified files that belong to a package. Not specifying any packages will return a list of _all_ modified files on the system's RPM database. .. versionadded:: 2015.5.0 root use root as top level directory (default: "/") CLI examples: .. code-block:: bash salt '*' lowpkg.modified httpd salt '*' lowpkg.modified httpd postfix salt '*' lowpkg.modified ''' cmd = ['rpm'] if flags.get('root'): cmd.extend(['--root', flags.pop('root')]) cmd.append('-Va') cmd.extend(packages) ret = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False) data = {} # If verification has an output, then it means it failed # and the return code will be 1. We are interested in any bigger # than 1 code. if ret['retcode'] > 1: del ret['stdout'] return ret elif not ret['retcode']: return data ptrn = re.compile(r"\s+") changes = cfg = f_name = None for f_info in salt.utils.itertools.split(ret['stdout'], '\n'): f_info = ptrn.split(f_info) if len(f_info) == 3: # Config file changes, cfg, f_name = f_info else: changes, f_name = f_info cfg = None keys = ['size', 'mode', 'checksum', 'device', 'symlink', 'owner', 'group', 'time', 'capabilities'] changes = list(changes) if len(changes) == 8: # Older RPMs do not support capabilities changes.append('.') stats = [] for k, v in zip(keys, changes): if v != '.': stats.append(k) if cfg is not None: stats.append('config') data[f_name] = stats if not flags: return data # Filtering filtered_data = {} for f_name, stats in data.items(): include = True for param, pval in flags.items(): if param.startswith("_"): continue if (not pval and param in stats) or \ (pval and param not in stats): include = False break if include: filtered_data[f_name] = stats return filtered_data def file_dict(*packages, **kwargs): ''' List the files that belong to a package, sorted by group. Not specifying any packages will return a list of _every_ file on the system's rpm database (not generally recommended). root use root as top level directory (default: "/") CLI Examples: .. code-block:: bash salt '*' lowpkg.file_dict httpd salt '*' lowpkg.file_dict httpd postfix salt '*' lowpkg.file_dict ''' errors = [] ret = {} pkgs = {} cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['-q' if packages else '-qa', '--queryformat', r'%{NAME} %{VERSION}\n']) if packages: cmd.extend(packages) out = __salt__['cmd.run'](cmd, output_loglevel='trace', python_shell=False) for line in salt.utils.itertools.split(out, '\n'): if 'is not installed' in line: errors.append(line) continue comps = line.split() pkgs[comps[0]] = {'version': comps[1]} for pkg in pkgs: cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['-ql', pkg]) out = __salt__['cmd.run']( ['rpm', '-ql', pkg], output_loglevel='trace', python_shell=False) ret[pkg] = out.splitlines() return {'errors': errors, 'packages': ret} def owner(*paths, **kwargs): ''' Return the name of the package that owns the file. Multiple file paths can be passed. If a single path is passed, a string will be returned, and if multiple paths are passed, a dictionary of file/package name pairs will be returned. If the file is not owned by a package, or is not present on the minion, then an empty string will be returned for that path. root use root as top level directory (default: "/") CLI Examples: .. code-block:: bash salt '*' lowpkg.owner /usr/bin/apachectl salt '*' lowpkg.owner /usr/bin/apachectl /etc/httpd/conf/httpd.conf ''' if not paths: return '' ret = {} for path in paths: cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['-qf', '--queryformat', '%{name}', path]) ret[path] = __salt__['cmd.run_stdout'](cmd, output_loglevel='trace', python_shell=False) if 'not owned' in ret[path].lower(): ret[path] = '' if len(ret) == 1: return list(ret.values())[0] return ret @salt.utils.decorators.path.which('rpm2cpio') @salt.utils.decorators.path.which('cpio') @salt.utils.decorators.path.which('diff') def diff(package_path, path): ''' Return a formatted diff between current file and original in a package. NOTE: this function includes all files (configuration and not), but does not work on binary content. :param package: Full pack of the RPM file :param path: Full path to the installed file :return: Difference or empty string. For binary files only a notification. CLI example: .. code-block:: bash salt '*' lowpkg.diff /path/to/apache2.rpm /etc/apache2/httpd.conf ''' cmd = "rpm2cpio {0} " \ "| cpio -i --quiet --to-stdout .{1} " \ "| diff -u --label 'A {1}' --from-file=- --label 'B {1}' {1}" res = __salt__['cmd.shell'](cmd.format(package_path, path), output_loglevel='trace') if res and res.startswith('Binary file'): return 'File \'{0}\' is binary and its content has been ' \ 'modified.'.format(path) return res def info(*packages, **kwargs): ''' Return a detailed package(s) summary information. If no packages specified, all packages will be returned. :param packages: :param attr: Comma-separated package attributes. If no 'attr' is specified, all available attributes returned. Valid attributes are: version, vendor, release, build_date, build_date_time_t, install_date, install_date_time_t, build_host, group, source_rpm, arch, epoch, size, license, signature, packager, url, summary, description. :param all_versions: Return information for all installed versions of the packages :param root: use root as top level directory (default: "/") :return: CLI example: .. code-block:: bash salt '*' lowpkg.info apache2 bash salt '*' lowpkg.info apache2 bash attr=version salt '*' lowpkg.info apache2 bash attr=version,build_date_iso,size salt '*' lowpkg.info apache2 bash attr=version,build_date_iso,size all_versions=True ''' all_versions = kwargs.get('all_versions', False) # LONGSIZE is not a valid tag for all versions of rpm. If LONGSIZE isn't # available, then we can just use SIZE for older versions. See Issue #31366. rpm_tags = __salt__['cmd.run_stdout']( ['rpm', '--querytags'], python_shell=False).splitlines() if 'LONGSIZE' in rpm_tags: size_tag = '%{LONGSIZE}' else: size_tag = '%{SIZE}' cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) if packages: cmd.append('-q') cmd.extend(packages) else: cmd.append('-qa') # Construct query format attr_map = { "name": "name: %{NAME}\\n", "relocations": "relocations: %|PREFIXES?{[%{PREFIXES} ]}:{(not relocatable)}|\\n", "version": "version: %{VERSION}\\n", "vendor": "vendor: %{VENDOR}\\n", "release": "release: %{RELEASE}\\n", "epoch": "%|EPOCH?{epoch: %{EPOCH}\\n}|", "build_date_time_t": "build_date_time_t: %{BUILDTIME}\\n", "build_date": "build_date: %{BUILDTIME}\\n", "install_date_time_t": "install_date_time_t: %|INSTALLTIME?{%{INSTALLTIME}}:{(not installed)}|\\n", "install_date": "install_date: %|INSTALLTIME?{%{INSTALLTIME}}:{(not installed)}|\\n", "build_host": "build_host: %{BUILDHOST}\\n", "group": "group: %{GROUP}\\n", "source_rpm": "source_rpm: %{SOURCERPM}\\n", "size": "size: " + size_tag + "\\n", "arch": "arch: %{ARCH}\\n", "license": "%|LICENSE?{license: %{LICENSE}\\n}|", "signature": "signature: %|DSAHEADER?{%{DSAHEADER:pgpsig}}:{%|RSAHEADER?{%{RSAHEADER:pgpsig}}:" "{%|SIGGPG?{%{SIGGPG:pgpsig}}:{%|SIGPGP?{%{SIGPGP:pgpsig}}:{(none)}|}|}|}|\\n", "packager": "%|PACKAGER?{packager: %{PACKAGER}\\n}|", "url": "%|URL?{url: %{URL}\\n}|", "summary": "summary: %{SUMMARY}\\n", "description": "description:\\n%{DESCRIPTION}\\n", "edition": "edition: %|EPOCH?{%{EPOCH}:}|%{VERSION}-%{RELEASE}\\n", } attr = kwargs.get('attr', None) and kwargs['attr'].split(",") or None query = list() if attr: for attr_k in attr: if attr_k in attr_map and attr_k != 'description': query.append(attr_map[attr_k]) if not query: raise CommandExecutionError('No valid attributes found.') if 'name' not in attr: attr.append('name') query.append(attr_map['name']) if 'edition' not in attr: attr.append('edition') query.append(attr_map['edition']) else: for attr_k, attr_v in six.iteritems(attr_map): if attr_k != 'description': query.append(attr_v) if attr and 'description' in attr or not attr: query.append(attr_map['description']) query.append("-----\\n") cmd = ' '.join(cmd) call = __salt__['cmd.run_all'](cmd + (" --queryformat '{0}'".format(''.join(query))), output_loglevel='trace', env={'TZ': 'UTC'}, clean_env=True) if call['retcode'] != 0: comment = '' if 'stderr' in call: comment += (call['stderr'] or call['stdout']) raise CommandExecutionError(comment) elif 'error' in call['stderr']: raise CommandExecutionError(call['stderr']) else: out = call['stdout'] _ret = list() for pkg_info in re.split(r"----*", out): pkg_info = pkg_info.strip() if not pkg_info: continue pkg_info = pkg_info.split(os.linesep) if pkg_info[-1].lower().startswith('distribution'): pkg_info = pkg_info[:-1] pkg_data = dict() pkg_name = None descr_marker = False descr = list() for line in pkg_info: if descr_marker: descr.append(line) continue line = [item.strip() for item in line.split(':', 1)] if len(line) != 2: continue key, value = line if key == 'description': descr_marker = True continue if key == 'name': pkg_name = value # Convert Unix ticks into ISO time format if key in ['build_date', 'install_date']: try: pkg_data[key] = datetime.datetime.utcfromtimestamp(int(value)).isoformat() + "Z" except ValueError: log.warning('Could not convert "%s" into Unix time', value) continue # Convert Unix ticks into an Integer if key in ['build_date_time_t', 'install_date_time_t']: try: pkg_data[key] = int(value) except ValueError: log.warning('Could not convert "%s" into Unix time', value) continue if key not in ['description', 'name'] and value: pkg_data[key] = value if attr and 'description' in attr or not attr: pkg_data['description'] = os.linesep.join(descr) if pkg_name: pkg_data['name'] = pkg_name _ret.append(pkg_data) # Force-sort package data by version, # pick only latest versions # (in case multiple packages installed, e.g. kernel) ret = dict() for pkg_data in reversed(sorted(_ret, key=lambda x: LooseVersion(x['edition']))): pkg_name = pkg_data.pop('name') # Filter out GPG public keys packages if pkg_name.startswith('gpg-pubkey'): continue if pkg_name not in ret: if all_versions: ret[pkg_name] = [pkg_data.copy()] else: ret[pkg_name] = pkg_data.copy() del ret[pkg_name]['edition'] elif all_versions: ret[pkg_name].append(pkg_data.copy()) return ret def version_cmp(ver1, ver2, ignore_epoch=False): ''' .. versionadded:: 2015.8.9 Do a cmp-style comparison on two packages. Return -1 if ver1 < ver2, 0 if ver1 == ver2, and 1 if ver1 > ver2. Return None if there was a problem making the comparison. ignore_epoch : False Set to ``True`` to ignore the epoch when comparing versions .. versionadded:: 2015.8.10,2016.3.2 CLI Example: .. code-block:: bash salt '*' pkg.version_cmp '0.2-001' '0.2.0.1-002' ''' normalize = lambda x: six.text_type(x).split(':', 1)[-1] \ if ignore_epoch \ else six.text_type(x) ver1 = normalize(ver1) ver2 = normalize(ver2) try: cmp_func = None if HAS_RPM: try: cmp_func = rpm.labelCompare except AttributeError: # Catches corner case where someone has a module named "rpm" in # their pythonpath. log.debug( 'rpm module imported, but it does not have the ' 'labelCompare function. Not using rpm.labelCompare for ' 'version comparison.' ) if cmp_func is None and HAS_RPMUTILS: try: cmp_func = rpmUtils.miscutils.compareEVR except AttributeError: log.debug('rpmUtils.miscutils.compareEVR is not available') if cmp_func is None: if salt.utils.path.which('rpmdev-vercmp'): # rpmdev-vercmp always uses epochs, even when zero def _ensure_epoch(ver): def _prepend(ver): return '0:{0}'.format(ver) try: if ':' not in ver: return _prepend(ver) except TypeError: return _prepend(ver) return ver ver1 = _ensure_epoch(ver1) ver2 = _ensure_epoch(ver2) result = __salt__['cmd.run_all']( ['rpmdev-vercmp', ver1, ver2], python_shell=False, redirect_stderr=True, ignore_retcode=True) # rpmdev-vercmp returns 0 on equal, 11 on greater-than, and # 12 on less-than. if result['retcode'] == 0: return 0 elif result['retcode'] == 11: return 1 elif result['retcode'] == 12: return -1 else: # We'll need to fall back to salt.utils.versions.version_cmp() log.warning( 'Failed to interpret results of rpmdev-vercmp output. ' 'This is probably a bug, and should be reported. ' 'Return code was %s. Output: %s', result['retcode'], result['stdout'] ) else: # We'll need to fall back to salt.utils.versions.version_cmp() log.warning( 'rpmdevtools is not installed, please install it for ' 'more accurate version comparisons' ) else: # If one EVR is missing a release but not the other and they # otherwise would be equal, ignore the release. This can happen if # e.g. you are checking if a package version 3.2 is satisfied by # 3.2-1. (ver1_e, ver1_v, ver1_r) = salt.utils.pkg.rpm.version_to_evr(ver1) (ver2_e, ver2_v, ver2_r) = salt.utils.pkg.rpm.version_to_evr(ver2) if not ver1_r or not ver2_r: ver1_r = ver2_r = '' cmp_result = cmp_func((ver1_e, ver1_v, ver1_r), (ver2_e, ver2_v, ver2_r)) if cmp_result not in (-1, 0, 1): raise CommandExecutionError( 'Comparison result \'{0}\' is invalid'.format(cmp_result) ) return cmp_result except Exception as exc: log.warning( 'Failed to compare version \'%s\' to \'%s\' using RPM: %s', ver1, ver2, exc ) # We would already have normalized the versions at the beginning of this # function if ignore_epoch=True, so avoid unnecessary work and just pass # False for this value. return salt.utils.versions.version_cmp(ver1, ver2, ignore_epoch=False) def checksum(*paths, **kwargs): ''' Return if the signature of a RPM file is valid. root use root as top level directory (default: "/") CLI Example: .. code-block:: bash salt '*' lowpkg.checksum /path/to/package1.rpm salt '*' lowpkg.checksum /path/to/package1.rpm /path/to/package2.rpm ''' ret = dict() if not paths: raise CommandExecutionError("No package files has been specified.") cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['-K', '--quiet']) for package_file in paths: cmd_ = cmd + [package_file] ret[package_file] = (bool(__salt__['file.file_exists'](package_file)) and not __salt__['cmd.retcode'](cmd_, ignore_retcode=True, output_loglevel='trace', python_shell=False)) return ret
saltstack/salt
salt/modules/rpm_lowpkg.py
file_dict
python
def file_dict(*packages, **kwargs): ''' List the files that belong to a package, sorted by group. Not specifying any packages will return a list of _every_ file on the system's rpm database (not generally recommended). root use root as top level directory (default: "/") CLI Examples: .. code-block:: bash salt '*' lowpkg.file_dict httpd salt '*' lowpkg.file_dict httpd postfix salt '*' lowpkg.file_dict ''' errors = [] ret = {} pkgs = {} cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['-q' if packages else '-qa', '--queryformat', r'%{NAME} %{VERSION}\n']) if packages: cmd.extend(packages) out = __salt__['cmd.run'](cmd, output_loglevel='trace', python_shell=False) for line in salt.utils.itertools.split(out, '\n'): if 'is not installed' in line: errors.append(line) continue comps = line.split() pkgs[comps[0]] = {'version': comps[1]} for pkg in pkgs: cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['-ql', pkg]) out = __salt__['cmd.run']( ['rpm', '-ql', pkg], output_loglevel='trace', python_shell=False) ret[pkg] = out.splitlines() return {'errors': errors, 'packages': ret}
List the files that belong to a package, sorted by group. Not specifying any packages will return a list of _every_ file on the system's rpm database (not generally recommended). root use root as top level directory (default: "/") CLI Examples: .. code-block:: bash salt '*' lowpkg.file_dict httpd salt '*' lowpkg.file_dict httpd postfix salt '*' lowpkg.file_dict
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/rpm_lowpkg.py#L376-L420
[ "def split(orig, sep=None):\n '''\n Generator function for iterating through large strings, particularly useful\n as a replacement for str.splitlines().\n\n See http://stackoverflow.com/a/3865367\n '''\n exp = re.compile(r'\\s+' if sep is None else re.escape(sep))\n pos = 0\n length = len(orig)\n while True:\n match = exp.search(orig, pos)\n if not match:\n if pos < length or sep is not None:\n val = orig[pos:]\n if val:\n # Only yield a value if the slice was not an empty string,\n # because if it is then we've reached the end. This keeps\n # us from yielding an extra blank value at the end.\n yield val\n break\n if pos < match.start() or sep is not None:\n yield orig[pos:match.start()]\n pos = match.end()\n" ]
# -*- coding: utf-8 -*- ''' Support for rpm ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import logging import os import re import datetime from salt.utils.versions import LooseVersion # Import Salt libs import salt.utils.decorators.path import salt.utils.itertools import salt.utils.path import salt.utils.pkg.rpm import salt.utils.versions # pylint: disable=import-error,redefined-builtin from salt.ext.six.moves import zip from salt.ext import six try: import rpm HAS_RPM = True except ImportError: HAS_RPM = False try: import rpmUtils.miscutils HAS_RPMUTILS = True except ImportError: HAS_RPMUTILS = False # pylint: enable=import-error,redefined-builtin from salt.exceptions import CommandExecutionError, SaltInvocationError log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'lowpkg' def __virtual__(): ''' Confine this module to rpm based systems ''' if not salt.utils.path.which('rpm'): return (False, 'The rpm execution module failed to load: rpm binary is not in the path.') try: os_grain = __grains__['os'].lower() os_family = __grains__['os_family'].lower() except Exception: return (False, 'The rpm execution module failed to load: failed to detect os or os_family grains.') enabled = ('amazon', 'xcp', 'xenserver', 'VirtuozzoLinux') if os_family in ['redhat', 'suse'] or os_grain in enabled: return __virtualname__ return (False, 'The rpm execution module failed to load: only available on redhat/suse type systems ' 'or amazon, xcp or xenserver.') def bin_pkg_info(path, saltenv='base'): ''' .. versionadded:: 2015.8.0 Parses RPM metadata and returns a dictionary of information about the package (name, version, etc.). path Path to the file. Can either be an absolute path to a file on the minion, or a salt fileserver URL (e.g. ``salt://path/to/file.rpm``). If a salt fileserver URL is passed, the file will be cached to the minion so that it can be examined. saltenv : base Salt fileserver environment from which to retrieve the package. Ignored if ``path`` is a local file path on the minion. CLI Example: .. code-block:: bash salt '*' lowpkg.bin_pkg_info /root/salt-2015.5.1-2.el7.noarch.rpm salt '*' lowpkg.bin_pkg_info salt://salt-2015.5.1-2.el7.noarch.rpm ''' # If the path is a valid protocol, pull it down using cp.cache_file if __salt__['config.valid_fileproto'](path): newpath = __salt__['cp.cache_file'](path, saltenv) if not newpath: raise CommandExecutionError( 'Unable to retrieve {0} from saltenv \'{1}\'' .format(path, saltenv) ) path = newpath else: if not os.path.exists(path): raise CommandExecutionError( '{0} does not exist on minion'.format(path) ) elif not os.path.isabs(path): raise SaltInvocationError( '{0} does not exist on minion'.format(path) ) # REPOID is not a valid tag for the rpm command. Remove it and replace it # with 'none' queryformat = salt.utils.pkg.rpm.QUERYFORMAT.replace('%{REPOID}', 'none') output = __salt__['cmd.run_stdout']( ['rpm', '-qp', '--queryformat', queryformat, path], output_loglevel='trace', ignore_retcode=True, python_shell=False ) ret = {} pkginfo = salt.utils.pkg.rpm.parse_pkginfo( output, osarch=__grains__['osarch'] ) try: for field in pkginfo._fields: ret[field] = getattr(pkginfo, field) except AttributeError: # pkginfo is None return None return ret def list_pkgs(*packages, **kwargs): ''' List the packages currently installed in a dict:: {'<package_name>': '<version>'} root use root as top level directory (default: "/") CLI Example: .. code-block:: bash salt '*' lowpkg.list_pkgs ''' pkgs = {} cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['-q' if packages else '-qa', '--queryformat', r'%{NAME} %{VERSION}\n']) if packages: cmd.extend(packages) out = __salt__['cmd.run'](cmd, output_loglevel='trace', python_shell=False) for line in salt.utils.itertools.split(out, '\n'): if 'is not installed' in line: continue comps = line.split() pkgs[comps[0]] = comps[1] return pkgs def verify(*packages, **kwargs): ''' Runs an rpm -Va on a system, and returns the results in a dict root use root as top level directory (default: "/") Files with an attribute of config, doc, ghost, license or readme in the package header can be ignored using the ``ignore_types`` keyword argument CLI Example: .. code-block:: bash salt '*' lowpkg.verify salt '*' lowpkg.verify httpd salt '*' lowpkg.verify httpd postfix salt '*' lowpkg.verify httpd postfix ignore_types=['config','doc'] ''' ftypes = {'c': 'config', 'd': 'doc', 'g': 'ghost', 'l': 'license', 'r': 'readme'} ret = {} ignore_types = kwargs.get('ignore_types', []) if not isinstance(ignore_types, (list, six.string_types)): raise SaltInvocationError( 'ignore_types must be a list or a comma-separated string' ) if isinstance(ignore_types, six.string_types): try: ignore_types = [x.strip() for x in ignore_types.split(',')] except AttributeError: ignore_types = [x.strip() for x in six.text_type(ignore_types).split(',')] verify_options = kwargs.get('verify_options', []) if not isinstance(verify_options, (list, six.string_types)): raise SaltInvocationError( 'verify_options must be a list or a comma-separated string' ) if isinstance(verify_options, six.string_types): try: verify_options = [x.strip() for x in verify_options.split(',')] except AttributeError: verify_options = [x.strip() for x in six.text_type(verify_options).split(',')] cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['--' + x for x in verify_options]) if packages: cmd.append('-V') # Can't concatenate a tuple, must do a list.extend() cmd.extend(packages) else: cmd.append('-Va') out = __salt__['cmd.run_all'](cmd, output_loglevel='trace', ignore_retcode=True, python_shell=False) if not out['stdout'].strip() and out['retcode'] != 0: # If there is no stdout and the retcode is 0, then verification # succeeded, but if the retcode is nonzero, then the command failed. msg = 'Failed to verify package(s)' if out['stderr']: msg += ': {0}'.format(out['stderr']) raise CommandExecutionError(msg) for line in salt.utils.itertools.split(out['stdout'], '\n'): fdict = {'mismatch': []} if 'missing' in line: line = ' ' + line fdict['missing'] = True del fdict['mismatch'] fname = line[13:] if line[11:12] in ftypes: fdict['type'] = ftypes[line[11:12]] if 'type' not in fdict or fdict['type'] not in ignore_types: if line[0:1] == 'S': fdict['mismatch'].append('size') if line[1:2] == 'M': fdict['mismatch'].append('mode') if line[2:3] == '5': fdict['mismatch'].append('md5sum') if line[3:4] == 'D': fdict['mismatch'].append('device major/minor number') if line[4:5] == 'L': fdict['mismatch'].append('readlink path') if line[5:6] == 'U': fdict['mismatch'].append('user') if line[6:7] == 'G': fdict['mismatch'].append('group') if line[7:8] == 'T': fdict['mismatch'].append('mtime') if line[8:9] == 'P': fdict['mismatch'].append('capabilities') ret[fname] = fdict return ret def modified(*packages, **flags): ''' List the modified files that belong to a package. Not specifying any packages will return a list of _all_ modified files on the system's RPM database. .. versionadded:: 2015.5.0 root use root as top level directory (default: "/") CLI examples: .. code-block:: bash salt '*' lowpkg.modified httpd salt '*' lowpkg.modified httpd postfix salt '*' lowpkg.modified ''' cmd = ['rpm'] if flags.get('root'): cmd.extend(['--root', flags.pop('root')]) cmd.append('-Va') cmd.extend(packages) ret = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False) data = {} # If verification has an output, then it means it failed # and the return code will be 1. We are interested in any bigger # than 1 code. if ret['retcode'] > 1: del ret['stdout'] return ret elif not ret['retcode']: return data ptrn = re.compile(r"\s+") changes = cfg = f_name = None for f_info in salt.utils.itertools.split(ret['stdout'], '\n'): f_info = ptrn.split(f_info) if len(f_info) == 3: # Config file changes, cfg, f_name = f_info else: changes, f_name = f_info cfg = None keys = ['size', 'mode', 'checksum', 'device', 'symlink', 'owner', 'group', 'time', 'capabilities'] changes = list(changes) if len(changes) == 8: # Older RPMs do not support capabilities changes.append('.') stats = [] for k, v in zip(keys, changes): if v != '.': stats.append(k) if cfg is not None: stats.append('config') data[f_name] = stats if not flags: return data # Filtering filtered_data = {} for f_name, stats in data.items(): include = True for param, pval in flags.items(): if param.startswith("_"): continue if (not pval and param in stats) or \ (pval and param not in stats): include = False break if include: filtered_data[f_name] = stats return filtered_data def file_list(*packages, **kwargs): ''' List the files that belong to a package. Not specifying any packages will return a list of _every_ file on the system's rpm database (not generally recommended). root use root as top level directory (default: "/") CLI Examples: .. code-block:: bash salt '*' lowpkg.file_list httpd salt '*' lowpkg.file_list httpd postfix salt '*' lowpkg.file_list ''' cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.append('-ql' if packages else '-qla') if packages: # Can't concatenate a tuple, must do a list.extend() cmd.extend(packages) ret = __salt__['cmd.run']( cmd, output_loglevel='trace', python_shell=False).splitlines() return {'errors': [], 'files': ret} def owner(*paths, **kwargs): ''' Return the name of the package that owns the file. Multiple file paths can be passed. If a single path is passed, a string will be returned, and if multiple paths are passed, a dictionary of file/package name pairs will be returned. If the file is not owned by a package, or is not present on the minion, then an empty string will be returned for that path. root use root as top level directory (default: "/") CLI Examples: .. code-block:: bash salt '*' lowpkg.owner /usr/bin/apachectl salt '*' lowpkg.owner /usr/bin/apachectl /etc/httpd/conf/httpd.conf ''' if not paths: return '' ret = {} for path in paths: cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['-qf', '--queryformat', '%{name}', path]) ret[path] = __salt__['cmd.run_stdout'](cmd, output_loglevel='trace', python_shell=False) if 'not owned' in ret[path].lower(): ret[path] = '' if len(ret) == 1: return list(ret.values())[0] return ret @salt.utils.decorators.path.which('rpm2cpio') @salt.utils.decorators.path.which('cpio') @salt.utils.decorators.path.which('diff') def diff(package_path, path): ''' Return a formatted diff between current file and original in a package. NOTE: this function includes all files (configuration and not), but does not work on binary content. :param package: Full pack of the RPM file :param path: Full path to the installed file :return: Difference or empty string. For binary files only a notification. CLI example: .. code-block:: bash salt '*' lowpkg.diff /path/to/apache2.rpm /etc/apache2/httpd.conf ''' cmd = "rpm2cpio {0} " \ "| cpio -i --quiet --to-stdout .{1} " \ "| diff -u --label 'A {1}' --from-file=- --label 'B {1}' {1}" res = __salt__['cmd.shell'](cmd.format(package_path, path), output_loglevel='trace') if res and res.startswith('Binary file'): return 'File \'{0}\' is binary and its content has been ' \ 'modified.'.format(path) return res def info(*packages, **kwargs): ''' Return a detailed package(s) summary information. If no packages specified, all packages will be returned. :param packages: :param attr: Comma-separated package attributes. If no 'attr' is specified, all available attributes returned. Valid attributes are: version, vendor, release, build_date, build_date_time_t, install_date, install_date_time_t, build_host, group, source_rpm, arch, epoch, size, license, signature, packager, url, summary, description. :param all_versions: Return information for all installed versions of the packages :param root: use root as top level directory (default: "/") :return: CLI example: .. code-block:: bash salt '*' lowpkg.info apache2 bash salt '*' lowpkg.info apache2 bash attr=version salt '*' lowpkg.info apache2 bash attr=version,build_date_iso,size salt '*' lowpkg.info apache2 bash attr=version,build_date_iso,size all_versions=True ''' all_versions = kwargs.get('all_versions', False) # LONGSIZE is not a valid tag for all versions of rpm. If LONGSIZE isn't # available, then we can just use SIZE for older versions. See Issue #31366. rpm_tags = __salt__['cmd.run_stdout']( ['rpm', '--querytags'], python_shell=False).splitlines() if 'LONGSIZE' in rpm_tags: size_tag = '%{LONGSIZE}' else: size_tag = '%{SIZE}' cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) if packages: cmd.append('-q') cmd.extend(packages) else: cmd.append('-qa') # Construct query format attr_map = { "name": "name: %{NAME}\\n", "relocations": "relocations: %|PREFIXES?{[%{PREFIXES} ]}:{(not relocatable)}|\\n", "version": "version: %{VERSION}\\n", "vendor": "vendor: %{VENDOR}\\n", "release": "release: %{RELEASE}\\n", "epoch": "%|EPOCH?{epoch: %{EPOCH}\\n}|", "build_date_time_t": "build_date_time_t: %{BUILDTIME}\\n", "build_date": "build_date: %{BUILDTIME}\\n", "install_date_time_t": "install_date_time_t: %|INSTALLTIME?{%{INSTALLTIME}}:{(not installed)}|\\n", "install_date": "install_date: %|INSTALLTIME?{%{INSTALLTIME}}:{(not installed)}|\\n", "build_host": "build_host: %{BUILDHOST}\\n", "group": "group: %{GROUP}\\n", "source_rpm": "source_rpm: %{SOURCERPM}\\n", "size": "size: " + size_tag + "\\n", "arch": "arch: %{ARCH}\\n", "license": "%|LICENSE?{license: %{LICENSE}\\n}|", "signature": "signature: %|DSAHEADER?{%{DSAHEADER:pgpsig}}:{%|RSAHEADER?{%{RSAHEADER:pgpsig}}:" "{%|SIGGPG?{%{SIGGPG:pgpsig}}:{%|SIGPGP?{%{SIGPGP:pgpsig}}:{(none)}|}|}|}|\\n", "packager": "%|PACKAGER?{packager: %{PACKAGER}\\n}|", "url": "%|URL?{url: %{URL}\\n}|", "summary": "summary: %{SUMMARY}\\n", "description": "description:\\n%{DESCRIPTION}\\n", "edition": "edition: %|EPOCH?{%{EPOCH}:}|%{VERSION}-%{RELEASE}\\n", } attr = kwargs.get('attr', None) and kwargs['attr'].split(",") or None query = list() if attr: for attr_k in attr: if attr_k in attr_map and attr_k != 'description': query.append(attr_map[attr_k]) if not query: raise CommandExecutionError('No valid attributes found.') if 'name' not in attr: attr.append('name') query.append(attr_map['name']) if 'edition' not in attr: attr.append('edition') query.append(attr_map['edition']) else: for attr_k, attr_v in six.iteritems(attr_map): if attr_k != 'description': query.append(attr_v) if attr and 'description' in attr or not attr: query.append(attr_map['description']) query.append("-----\\n") cmd = ' '.join(cmd) call = __salt__['cmd.run_all'](cmd + (" --queryformat '{0}'".format(''.join(query))), output_loglevel='trace', env={'TZ': 'UTC'}, clean_env=True) if call['retcode'] != 0: comment = '' if 'stderr' in call: comment += (call['stderr'] or call['stdout']) raise CommandExecutionError(comment) elif 'error' in call['stderr']: raise CommandExecutionError(call['stderr']) else: out = call['stdout'] _ret = list() for pkg_info in re.split(r"----*", out): pkg_info = pkg_info.strip() if not pkg_info: continue pkg_info = pkg_info.split(os.linesep) if pkg_info[-1].lower().startswith('distribution'): pkg_info = pkg_info[:-1] pkg_data = dict() pkg_name = None descr_marker = False descr = list() for line in pkg_info: if descr_marker: descr.append(line) continue line = [item.strip() for item in line.split(':', 1)] if len(line) != 2: continue key, value = line if key == 'description': descr_marker = True continue if key == 'name': pkg_name = value # Convert Unix ticks into ISO time format if key in ['build_date', 'install_date']: try: pkg_data[key] = datetime.datetime.utcfromtimestamp(int(value)).isoformat() + "Z" except ValueError: log.warning('Could not convert "%s" into Unix time', value) continue # Convert Unix ticks into an Integer if key in ['build_date_time_t', 'install_date_time_t']: try: pkg_data[key] = int(value) except ValueError: log.warning('Could not convert "%s" into Unix time', value) continue if key not in ['description', 'name'] and value: pkg_data[key] = value if attr and 'description' in attr or not attr: pkg_data['description'] = os.linesep.join(descr) if pkg_name: pkg_data['name'] = pkg_name _ret.append(pkg_data) # Force-sort package data by version, # pick only latest versions # (in case multiple packages installed, e.g. kernel) ret = dict() for pkg_data in reversed(sorted(_ret, key=lambda x: LooseVersion(x['edition']))): pkg_name = pkg_data.pop('name') # Filter out GPG public keys packages if pkg_name.startswith('gpg-pubkey'): continue if pkg_name not in ret: if all_versions: ret[pkg_name] = [pkg_data.copy()] else: ret[pkg_name] = pkg_data.copy() del ret[pkg_name]['edition'] elif all_versions: ret[pkg_name].append(pkg_data.copy()) return ret def version_cmp(ver1, ver2, ignore_epoch=False): ''' .. versionadded:: 2015.8.9 Do a cmp-style comparison on two packages. Return -1 if ver1 < ver2, 0 if ver1 == ver2, and 1 if ver1 > ver2. Return None if there was a problem making the comparison. ignore_epoch : False Set to ``True`` to ignore the epoch when comparing versions .. versionadded:: 2015.8.10,2016.3.2 CLI Example: .. code-block:: bash salt '*' pkg.version_cmp '0.2-001' '0.2.0.1-002' ''' normalize = lambda x: six.text_type(x).split(':', 1)[-1] \ if ignore_epoch \ else six.text_type(x) ver1 = normalize(ver1) ver2 = normalize(ver2) try: cmp_func = None if HAS_RPM: try: cmp_func = rpm.labelCompare except AttributeError: # Catches corner case where someone has a module named "rpm" in # their pythonpath. log.debug( 'rpm module imported, but it does not have the ' 'labelCompare function. Not using rpm.labelCompare for ' 'version comparison.' ) if cmp_func is None and HAS_RPMUTILS: try: cmp_func = rpmUtils.miscutils.compareEVR except AttributeError: log.debug('rpmUtils.miscutils.compareEVR is not available') if cmp_func is None: if salt.utils.path.which('rpmdev-vercmp'): # rpmdev-vercmp always uses epochs, even when zero def _ensure_epoch(ver): def _prepend(ver): return '0:{0}'.format(ver) try: if ':' not in ver: return _prepend(ver) except TypeError: return _prepend(ver) return ver ver1 = _ensure_epoch(ver1) ver2 = _ensure_epoch(ver2) result = __salt__['cmd.run_all']( ['rpmdev-vercmp', ver1, ver2], python_shell=False, redirect_stderr=True, ignore_retcode=True) # rpmdev-vercmp returns 0 on equal, 11 on greater-than, and # 12 on less-than. if result['retcode'] == 0: return 0 elif result['retcode'] == 11: return 1 elif result['retcode'] == 12: return -1 else: # We'll need to fall back to salt.utils.versions.version_cmp() log.warning( 'Failed to interpret results of rpmdev-vercmp output. ' 'This is probably a bug, and should be reported. ' 'Return code was %s. Output: %s', result['retcode'], result['stdout'] ) else: # We'll need to fall back to salt.utils.versions.version_cmp() log.warning( 'rpmdevtools is not installed, please install it for ' 'more accurate version comparisons' ) else: # If one EVR is missing a release but not the other and they # otherwise would be equal, ignore the release. This can happen if # e.g. you are checking if a package version 3.2 is satisfied by # 3.2-1. (ver1_e, ver1_v, ver1_r) = salt.utils.pkg.rpm.version_to_evr(ver1) (ver2_e, ver2_v, ver2_r) = salt.utils.pkg.rpm.version_to_evr(ver2) if not ver1_r or not ver2_r: ver1_r = ver2_r = '' cmp_result = cmp_func((ver1_e, ver1_v, ver1_r), (ver2_e, ver2_v, ver2_r)) if cmp_result not in (-1, 0, 1): raise CommandExecutionError( 'Comparison result \'{0}\' is invalid'.format(cmp_result) ) return cmp_result except Exception as exc: log.warning( 'Failed to compare version \'%s\' to \'%s\' using RPM: %s', ver1, ver2, exc ) # We would already have normalized the versions at the beginning of this # function if ignore_epoch=True, so avoid unnecessary work and just pass # False for this value. return salt.utils.versions.version_cmp(ver1, ver2, ignore_epoch=False) def checksum(*paths, **kwargs): ''' Return if the signature of a RPM file is valid. root use root as top level directory (default: "/") CLI Example: .. code-block:: bash salt '*' lowpkg.checksum /path/to/package1.rpm salt '*' lowpkg.checksum /path/to/package1.rpm /path/to/package2.rpm ''' ret = dict() if not paths: raise CommandExecutionError("No package files has been specified.") cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['-K', '--quiet']) for package_file in paths: cmd_ = cmd + [package_file] ret[package_file] = (bool(__salt__['file.file_exists'](package_file)) and not __salt__['cmd.retcode'](cmd_, ignore_retcode=True, output_loglevel='trace', python_shell=False)) return ret
saltstack/salt
salt/modules/rpm_lowpkg.py
owner
python
def owner(*paths, **kwargs): ''' Return the name of the package that owns the file. Multiple file paths can be passed. If a single path is passed, a string will be returned, and if multiple paths are passed, a dictionary of file/package name pairs will be returned. If the file is not owned by a package, or is not present on the minion, then an empty string will be returned for that path. root use root as top level directory (default: "/") CLI Examples: .. code-block:: bash salt '*' lowpkg.owner /usr/bin/apachectl salt '*' lowpkg.owner /usr/bin/apachectl /etc/httpd/conf/httpd.conf ''' if not paths: return '' ret = {} for path in paths: cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['-qf', '--queryformat', '%{name}', path]) ret[path] = __salt__['cmd.run_stdout'](cmd, output_loglevel='trace', python_shell=False) if 'not owned' in ret[path].lower(): ret[path] = '' if len(ret) == 1: return list(ret.values())[0] return ret
Return the name of the package that owns the file. Multiple file paths can be passed. If a single path is passed, a string will be returned, and if multiple paths are passed, a dictionary of file/package name pairs will be returned. If the file is not owned by a package, or is not present on the minion, then an empty string will be returned for that path. root use root as top level directory (default: "/") CLI Examples: .. code-block:: bash salt '*' lowpkg.owner /usr/bin/apachectl salt '*' lowpkg.owner /usr/bin/apachectl /etc/httpd/conf/httpd.conf
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/rpm_lowpkg.py#L423-L458
null
# -*- coding: utf-8 -*- ''' Support for rpm ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import logging import os import re import datetime from salt.utils.versions import LooseVersion # Import Salt libs import salt.utils.decorators.path import salt.utils.itertools import salt.utils.path import salt.utils.pkg.rpm import salt.utils.versions # pylint: disable=import-error,redefined-builtin from salt.ext.six.moves import zip from salt.ext import six try: import rpm HAS_RPM = True except ImportError: HAS_RPM = False try: import rpmUtils.miscutils HAS_RPMUTILS = True except ImportError: HAS_RPMUTILS = False # pylint: enable=import-error,redefined-builtin from salt.exceptions import CommandExecutionError, SaltInvocationError log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'lowpkg' def __virtual__(): ''' Confine this module to rpm based systems ''' if not salt.utils.path.which('rpm'): return (False, 'The rpm execution module failed to load: rpm binary is not in the path.') try: os_grain = __grains__['os'].lower() os_family = __grains__['os_family'].lower() except Exception: return (False, 'The rpm execution module failed to load: failed to detect os or os_family grains.') enabled = ('amazon', 'xcp', 'xenserver', 'VirtuozzoLinux') if os_family in ['redhat', 'suse'] or os_grain in enabled: return __virtualname__ return (False, 'The rpm execution module failed to load: only available on redhat/suse type systems ' 'or amazon, xcp or xenserver.') def bin_pkg_info(path, saltenv='base'): ''' .. versionadded:: 2015.8.0 Parses RPM metadata and returns a dictionary of information about the package (name, version, etc.). path Path to the file. Can either be an absolute path to a file on the minion, or a salt fileserver URL (e.g. ``salt://path/to/file.rpm``). If a salt fileserver URL is passed, the file will be cached to the minion so that it can be examined. saltenv : base Salt fileserver environment from which to retrieve the package. Ignored if ``path`` is a local file path on the minion. CLI Example: .. code-block:: bash salt '*' lowpkg.bin_pkg_info /root/salt-2015.5.1-2.el7.noarch.rpm salt '*' lowpkg.bin_pkg_info salt://salt-2015.5.1-2.el7.noarch.rpm ''' # If the path is a valid protocol, pull it down using cp.cache_file if __salt__['config.valid_fileproto'](path): newpath = __salt__['cp.cache_file'](path, saltenv) if not newpath: raise CommandExecutionError( 'Unable to retrieve {0} from saltenv \'{1}\'' .format(path, saltenv) ) path = newpath else: if not os.path.exists(path): raise CommandExecutionError( '{0} does not exist on minion'.format(path) ) elif not os.path.isabs(path): raise SaltInvocationError( '{0} does not exist on minion'.format(path) ) # REPOID is not a valid tag for the rpm command. Remove it and replace it # with 'none' queryformat = salt.utils.pkg.rpm.QUERYFORMAT.replace('%{REPOID}', 'none') output = __salt__['cmd.run_stdout']( ['rpm', '-qp', '--queryformat', queryformat, path], output_loglevel='trace', ignore_retcode=True, python_shell=False ) ret = {} pkginfo = salt.utils.pkg.rpm.parse_pkginfo( output, osarch=__grains__['osarch'] ) try: for field in pkginfo._fields: ret[field] = getattr(pkginfo, field) except AttributeError: # pkginfo is None return None return ret def list_pkgs(*packages, **kwargs): ''' List the packages currently installed in a dict:: {'<package_name>': '<version>'} root use root as top level directory (default: "/") CLI Example: .. code-block:: bash salt '*' lowpkg.list_pkgs ''' pkgs = {} cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['-q' if packages else '-qa', '--queryformat', r'%{NAME} %{VERSION}\n']) if packages: cmd.extend(packages) out = __salt__['cmd.run'](cmd, output_loglevel='trace', python_shell=False) for line in salt.utils.itertools.split(out, '\n'): if 'is not installed' in line: continue comps = line.split() pkgs[comps[0]] = comps[1] return pkgs def verify(*packages, **kwargs): ''' Runs an rpm -Va on a system, and returns the results in a dict root use root as top level directory (default: "/") Files with an attribute of config, doc, ghost, license or readme in the package header can be ignored using the ``ignore_types`` keyword argument CLI Example: .. code-block:: bash salt '*' lowpkg.verify salt '*' lowpkg.verify httpd salt '*' lowpkg.verify httpd postfix salt '*' lowpkg.verify httpd postfix ignore_types=['config','doc'] ''' ftypes = {'c': 'config', 'd': 'doc', 'g': 'ghost', 'l': 'license', 'r': 'readme'} ret = {} ignore_types = kwargs.get('ignore_types', []) if not isinstance(ignore_types, (list, six.string_types)): raise SaltInvocationError( 'ignore_types must be a list or a comma-separated string' ) if isinstance(ignore_types, six.string_types): try: ignore_types = [x.strip() for x in ignore_types.split(',')] except AttributeError: ignore_types = [x.strip() for x in six.text_type(ignore_types).split(',')] verify_options = kwargs.get('verify_options', []) if not isinstance(verify_options, (list, six.string_types)): raise SaltInvocationError( 'verify_options must be a list or a comma-separated string' ) if isinstance(verify_options, six.string_types): try: verify_options = [x.strip() for x in verify_options.split(',')] except AttributeError: verify_options = [x.strip() for x in six.text_type(verify_options).split(',')] cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['--' + x for x in verify_options]) if packages: cmd.append('-V') # Can't concatenate a tuple, must do a list.extend() cmd.extend(packages) else: cmd.append('-Va') out = __salt__['cmd.run_all'](cmd, output_loglevel='trace', ignore_retcode=True, python_shell=False) if not out['stdout'].strip() and out['retcode'] != 0: # If there is no stdout and the retcode is 0, then verification # succeeded, but if the retcode is nonzero, then the command failed. msg = 'Failed to verify package(s)' if out['stderr']: msg += ': {0}'.format(out['stderr']) raise CommandExecutionError(msg) for line in salt.utils.itertools.split(out['stdout'], '\n'): fdict = {'mismatch': []} if 'missing' in line: line = ' ' + line fdict['missing'] = True del fdict['mismatch'] fname = line[13:] if line[11:12] in ftypes: fdict['type'] = ftypes[line[11:12]] if 'type' not in fdict or fdict['type'] not in ignore_types: if line[0:1] == 'S': fdict['mismatch'].append('size') if line[1:2] == 'M': fdict['mismatch'].append('mode') if line[2:3] == '5': fdict['mismatch'].append('md5sum') if line[3:4] == 'D': fdict['mismatch'].append('device major/minor number') if line[4:5] == 'L': fdict['mismatch'].append('readlink path') if line[5:6] == 'U': fdict['mismatch'].append('user') if line[6:7] == 'G': fdict['mismatch'].append('group') if line[7:8] == 'T': fdict['mismatch'].append('mtime') if line[8:9] == 'P': fdict['mismatch'].append('capabilities') ret[fname] = fdict return ret def modified(*packages, **flags): ''' List the modified files that belong to a package. Not specifying any packages will return a list of _all_ modified files on the system's RPM database. .. versionadded:: 2015.5.0 root use root as top level directory (default: "/") CLI examples: .. code-block:: bash salt '*' lowpkg.modified httpd salt '*' lowpkg.modified httpd postfix salt '*' lowpkg.modified ''' cmd = ['rpm'] if flags.get('root'): cmd.extend(['--root', flags.pop('root')]) cmd.append('-Va') cmd.extend(packages) ret = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False) data = {} # If verification has an output, then it means it failed # and the return code will be 1. We are interested in any bigger # than 1 code. if ret['retcode'] > 1: del ret['stdout'] return ret elif not ret['retcode']: return data ptrn = re.compile(r"\s+") changes = cfg = f_name = None for f_info in salt.utils.itertools.split(ret['stdout'], '\n'): f_info = ptrn.split(f_info) if len(f_info) == 3: # Config file changes, cfg, f_name = f_info else: changes, f_name = f_info cfg = None keys = ['size', 'mode', 'checksum', 'device', 'symlink', 'owner', 'group', 'time', 'capabilities'] changes = list(changes) if len(changes) == 8: # Older RPMs do not support capabilities changes.append('.') stats = [] for k, v in zip(keys, changes): if v != '.': stats.append(k) if cfg is not None: stats.append('config') data[f_name] = stats if not flags: return data # Filtering filtered_data = {} for f_name, stats in data.items(): include = True for param, pval in flags.items(): if param.startswith("_"): continue if (not pval and param in stats) or \ (pval and param not in stats): include = False break if include: filtered_data[f_name] = stats return filtered_data def file_list(*packages, **kwargs): ''' List the files that belong to a package. Not specifying any packages will return a list of _every_ file on the system's rpm database (not generally recommended). root use root as top level directory (default: "/") CLI Examples: .. code-block:: bash salt '*' lowpkg.file_list httpd salt '*' lowpkg.file_list httpd postfix salt '*' lowpkg.file_list ''' cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.append('-ql' if packages else '-qla') if packages: # Can't concatenate a tuple, must do a list.extend() cmd.extend(packages) ret = __salt__['cmd.run']( cmd, output_loglevel='trace', python_shell=False).splitlines() return {'errors': [], 'files': ret} def file_dict(*packages, **kwargs): ''' List the files that belong to a package, sorted by group. Not specifying any packages will return a list of _every_ file on the system's rpm database (not generally recommended). root use root as top level directory (default: "/") CLI Examples: .. code-block:: bash salt '*' lowpkg.file_dict httpd salt '*' lowpkg.file_dict httpd postfix salt '*' lowpkg.file_dict ''' errors = [] ret = {} pkgs = {} cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['-q' if packages else '-qa', '--queryformat', r'%{NAME} %{VERSION}\n']) if packages: cmd.extend(packages) out = __salt__['cmd.run'](cmd, output_loglevel='trace', python_shell=False) for line in salt.utils.itertools.split(out, '\n'): if 'is not installed' in line: errors.append(line) continue comps = line.split() pkgs[comps[0]] = {'version': comps[1]} for pkg in pkgs: cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['-ql', pkg]) out = __salt__['cmd.run']( ['rpm', '-ql', pkg], output_loglevel='trace', python_shell=False) ret[pkg] = out.splitlines() return {'errors': errors, 'packages': ret} @salt.utils.decorators.path.which('rpm2cpio') @salt.utils.decorators.path.which('cpio') @salt.utils.decorators.path.which('diff') def diff(package_path, path): ''' Return a formatted diff between current file and original in a package. NOTE: this function includes all files (configuration and not), but does not work on binary content. :param package: Full pack of the RPM file :param path: Full path to the installed file :return: Difference or empty string. For binary files only a notification. CLI example: .. code-block:: bash salt '*' lowpkg.diff /path/to/apache2.rpm /etc/apache2/httpd.conf ''' cmd = "rpm2cpio {0} " \ "| cpio -i --quiet --to-stdout .{1} " \ "| diff -u --label 'A {1}' --from-file=- --label 'B {1}' {1}" res = __salt__['cmd.shell'](cmd.format(package_path, path), output_loglevel='trace') if res and res.startswith('Binary file'): return 'File \'{0}\' is binary and its content has been ' \ 'modified.'.format(path) return res def info(*packages, **kwargs): ''' Return a detailed package(s) summary information. If no packages specified, all packages will be returned. :param packages: :param attr: Comma-separated package attributes. If no 'attr' is specified, all available attributes returned. Valid attributes are: version, vendor, release, build_date, build_date_time_t, install_date, install_date_time_t, build_host, group, source_rpm, arch, epoch, size, license, signature, packager, url, summary, description. :param all_versions: Return information for all installed versions of the packages :param root: use root as top level directory (default: "/") :return: CLI example: .. code-block:: bash salt '*' lowpkg.info apache2 bash salt '*' lowpkg.info apache2 bash attr=version salt '*' lowpkg.info apache2 bash attr=version,build_date_iso,size salt '*' lowpkg.info apache2 bash attr=version,build_date_iso,size all_versions=True ''' all_versions = kwargs.get('all_versions', False) # LONGSIZE is not a valid tag for all versions of rpm. If LONGSIZE isn't # available, then we can just use SIZE for older versions. See Issue #31366. rpm_tags = __salt__['cmd.run_stdout']( ['rpm', '--querytags'], python_shell=False).splitlines() if 'LONGSIZE' in rpm_tags: size_tag = '%{LONGSIZE}' else: size_tag = '%{SIZE}' cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) if packages: cmd.append('-q') cmd.extend(packages) else: cmd.append('-qa') # Construct query format attr_map = { "name": "name: %{NAME}\\n", "relocations": "relocations: %|PREFIXES?{[%{PREFIXES} ]}:{(not relocatable)}|\\n", "version": "version: %{VERSION}\\n", "vendor": "vendor: %{VENDOR}\\n", "release": "release: %{RELEASE}\\n", "epoch": "%|EPOCH?{epoch: %{EPOCH}\\n}|", "build_date_time_t": "build_date_time_t: %{BUILDTIME}\\n", "build_date": "build_date: %{BUILDTIME}\\n", "install_date_time_t": "install_date_time_t: %|INSTALLTIME?{%{INSTALLTIME}}:{(not installed)}|\\n", "install_date": "install_date: %|INSTALLTIME?{%{INSTALLTIME}}:{(not installed)}|\\n", "build_host": "build_host: %{BUILDHOST}\\n", "group": "group: %{GROUP}\\n", "source_rpm": "source_rpm: %{SOURCERPM}\\n", "size": "size: " + size_tag + "\\n", "arch": "arch: %{ARCH}\\n", "license": "%|LICENSE?{license: %{LICENSE}\\n}|", "signature": "signature: %|DSAHEADER?{%{DSAHEADER:pgpsig}}:{%|RSAHEADER?{%{RSAHEADER:pgpsig}}:" "{%|SIGGPG?{%{SIGGPG:pgpsig}}:{%|SIGPGP?{%{SIGPGP:pgpsig}}:{(none)}|}|}|}|\\n", "packager": "%|PACKAGER?{packager: %{PACKAGER}\\n}|", "url": "%|URL?{url: %{URL}\\n}|", "summary": "summary: %{SUMMARY}\\n", "description": "description:\\n%{DESCRIPTION}\\n", "edition": "edition: %|EPOCH?{%{EPOCH}:}|%{VERSION}-%{RELEASE}\\n", } attr = kwargs.get('attr', None) and kwargs['attr'].split(",") or None query = list() if attr: for attr_k in attr: if attr_k in attr_map and attr_k != 'description': query.append(attr_map[attr_k]) if not query: raise CommandExecutionError('No valid attributes found.') if 'name' not in attr: attr.append('name') query.append(attr_map['name']) if 'edition' not in attr: attr.append('edition') query.append(attr_map['edition']) else: for attr_k, attr_v in six.iteritems(attr_map): if attr_k != 'description': query.append(attr_v) if attr and 'description' in attr or not attr: query.append(attr_map['description']) query.append("-----\\n") cmd = ' '.join(cmd) call = __salt__['cmd.run_all'](cmd + (" --queryformat '{0}'".format(''.join(query))), output_loglevel='trace', env={'TZ': 'UTC'}, clean_env=True) if call['retcode'] != 0: comment = '' if 'stderr' in call: comment += (call['stderr'] or call['stdout']) raise CommandExecutionError(comment) elif 'error' in call['stderr']: raise CommandExecutionError(call['stderr']) else: out = call['stdout'] _ret = list() for pkg_info in re.split(r"----*", out): pkg_info = pkg_info.strip() if not pkg_info: continue pkg_info = pkg_info.split(os.linesep) if pkg_info[-1].lower().startswith('distribution'): pkg_info = pkg_info[:-1] pkg_data = dict() pkg_name = None descr_marker = False descr = list() for line in pkg_info: if descr_marker: descr.append(line) continue line = [item.strip() for item in line.split(':', 1)] if len(line) != 2: continue key, value = line if key == 'description': descr_marker = True continue if key == 'name': pkg_name = value # Convert Unix ticks into ISO time format if key in ['build_date', 'install_date']: try: pkg_data[key] = datetime.datetime.utcfromtimestamp(int(value)).isoformat() + "Z" except ValueError: log.warning('Could not convert "%s" into Unix time', value) continue # Convert Unix ticks into an Integer if key in ['build_date_time_t', 'install_date_time_t']: try: pkg_data[key] = int(value) except ValueError: log.warning('Could not convert "%s" into Unix time', value) continue if key not in ['description', 'name'] and value: pkg_data[key] = value if attr and 'description' in attr or not attr: pkg_data['description'] = os.linesep.join(descr) if pkg_name: pkg_data['name'] = pkg_name _ret.append(pkg_data) # Force-sort package data by version, # pick only latest versions # (in case multiple packages installed, e.g. kernel) ret = dict() for pkg_data in reversed(sorted(_ret, key=lambda x: LooseVersion(x['edition']))): pkg_name = pkg_data.pop('name') # Filter out GPG public keys packages if pkg_name.startswith('gpg-pubkey'): continue if pkg_name not in ret: if all_versions: ret[pkg_name] = [pkg_data.copy()] else: ret[pkg_name] = pkg_data.copy() del ret[pkg_name]['edition'] elif all_versions: ret[pkg_name].append(pkg_data.copy()) return ret def version_cmp(ver1, ver2, ignore_epoch=False): ''' .. versionadded:: 2015.8.9 Do a cmp-style comparison on two packages. Return -1 if ver1 < ver2, 0 if ver1 == ver2, and 1 if ver1 > ver2. Return None if there was a problem making the comparison. ignore_epoch : False Set to ``True`` to ignore the epoch when comparing versions .. versionadded:: 2015.8.10,2016.3.2 CLI Example: .. code-block:: bash salt '*' pkg.version_cmp '0.2-001' '0.2.0.1-002' ''' normalize = lambda x: six.text_type(x).split(':', 1)[-1] \ if ignore_epoch \ else six.text_type(x) ver1 = normalize(ver1) ver2 = normalize(ver2) try: cmp_func = None if HAS_RPM: try: cmp_func = rpm.labelCompare except AttributeError: # Catches corner case where someone has a module named "rpm" in # their pythonpath. log.debug( 'rpm module imported, but it does not have the ' 'labelCompare function. Not using rpm.labelCompare for ' 'version comparison.' ) if cmp_func is None and HAS_RPMUTILS: try: cmp_func = rpmUtils.miscutils.compareEVR except AttributeError: log.debug('rpmUtils.miscutils.compareEVR is not available') if cmp_func is None: if salt.utils.path.which('rpmdev-vercmp'): # rpmdev-vercmp always uses epochs, even when zero def _ensure_epoch(ver): def _prepend(ver): return '0:{0}'.format(ver) try: if ':' not in ver: return _prepend(ver) except TypeError: return _prepend(ver) return ver ver1 = _ensure_epoch(ver1) ver2 = _ensure_epoch(ver2) result = __salt__['cmd.run_all']( ['rpmdev-vercmp', ver1, ver2], python_shell=False, redirect_stderr=True, ignore_retcode=True) # rpmdev-vercmp returns 0 on equal, 11 on greater-than, and # 12 on less-than. if result['retcode'] == 0: return 0 elif result['retcode'] == 11: return 1 elif result['retcode'] == 12: return -1 else: # We'll need to fall back to salt.utils.versions.version_cmp() log.warning( 'Failed to interpret results of rpmdev-vercmp output. ' 'This is probably a bug, and should be reported. ' 'Return code was %s. Output: %s', result['retcode'], result['stdout'] ) else: # We'll need to fall back to salt.utils.versions.version_cmp() log.warning( 'rpmdevtools is not installed, please install it for ' 'more accurate version comparisons' ) else: # If one EVR is missing a release but not the other and they # otherwise would be equal, ignore the release. This can happen if # e.g. you are checking if a package version 3.2 is satisfied by # 3.2-1. (ver1_e, ver1_v, ver1_r) = salt.utils.pkg.rpm.version_to_evr(ver1) (ver2_e, ver2_v, ver2_r) = salt.utils.pkg.rpm.version_to_evr(ver2) if not ver1_r or not ver2_r: ver1_r = ver2_r = '' cmp_result = cmp_func((ver1_e, ver1_v, ver1_r), (ver2_e, ver2_v, ver2_r)) if cmp_result not in (-1, 0, 1): raise CommandExecutionError( 'Comparison result \'{0}\' is invalid'.format(cmp_result) ) return cmp_result except Exception as exc: log.warning( 'Failed to compare version \'%s\' to \'%s\' using RPM: %s', ver1, ver2, exc ) # We would already have normalized the versions at the beginning of this # function if ignore_epoch=True, so avoid unnecessary work and just pass # False for this value. return salt.utils.versions.version_cmp(ver1, ver2, ignore_epoch=False) def checksum(*paths, **kwargs): ''' Return if the signature of a RPM file is valid. root use root as top level directory (default: "/") CLI Example: .. code-block:: bash salt '*' lowpkg.checksum /path/to/package1.rpm salt '*' lowpkg.checksum /path/to/package1.rpm /path/to/package2.rpm ''' ret = dict() if not paths: raise CommandExecutionError("No package files has been specified.") cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['-K', '--quiet']) for package_file in paths: cmd_ = cmd + [package_file] ret[package_file] = (bool(__salt__['file.file_exists'](package_file)) and not __salt__['cmd.retcode'](cmd_, ignore_retcode=True, output_loglevel='trace', python_shell=False)) return ret
saltstack/salt
salt/modules/rpm_lowpkg.py
diff
python
def diff(package_path, path): ''' Return a formatted diff between current file and original in a package. NOTE: this function includes all files (configuration and not), but does not work on binary content. :param package: Full pack of the RPM file :param path: Full path to the installed file :return: Difference or empty string. For binary files only a notification. CLI example: .. code-block:: bash salt '*' lowpkg.diff /path/to/apache2.rpm /etc/apache2/httpd.conf ''' cmd = "rpm2cpio {0} " \ "| cpio -i --quiet --to-stdout .{1} " \ "| diff -u --label 'A {1}' --from-file=- --label 'B {1}' {1}" res = __salt__['cmd.shell'](cmd.format(package_path, path), output_loglevel='trace') if res and res.startswith('Binary file'): return 'File \'{0}\' is binary and its content has been ' \ 'modified.'.format(path) return res
Return a formatted diff between current file and original in a package. NOTE: this function includes all files (configuration and not), but does not work on binary content. :param package: Full pack of the RPM file :param path: Full path to the installed file :return: Difference or empty string. For binary files only a notification. CLI example: .. code-block:: bash salt '*' lowpkg.diff /path/to/apache2.rpm /etc/apache2/httpd.conf
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/rpm_lowpkg.py#L464-L490
null
# -*- coding: utf-8 -*- ''' Support for rpm ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import logging import os import re import datetime from salt.utils.versions import LooseVersion # Import Salt libs import salt.utils.decorators.path import salt.utils.itertools import salt.utils.path import salt.utils.pkg.rpm import salt.utils.versions # pylint: disable=import-error,redefined-builtin from salt.ext.six.moves import zip from salt.ext import six try: import rpm HAS_RPM = True except ImportError: HAS_RPM = False try: import rpmUtils.miscutils HAS_RPMUTILS = True except ImportError: HAS_RPMUTILS = False # pylint: enable=import-error,redefined-builtin from salt.exceptions import CommandExecutionError, SaltInvocationError log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'lowpkg' def __virtual__(): ''' Confine this module to rpm based systems ''' if not salt.utils.path.which('rpm'): return (False, 'The rpm execution module failed to load: rpm binary is not in the path.') try: os_grain = __grains__['os'].lower() os_family = __grains__['os_family'].lower() except Exception: return (False, 'The rpm execution module failed to load: failed to detect os or os_family grains.') enabled = ('amazon', 'xcp', 'xenserver', 'VirtuozzoLinux') if os_family in ['redhat', 'suse'] or os_grain in enabled: return __virtualname__ return (False, 'The rpm execution module failed to load: only available on redhat/suse type systems ' 'or amazon, xcp or xenserver.') def bin_pkg_info(path, saltenv='base'): ''' .. versionadded:: 2015.8.0 Parses RPM metadata and returns a dictionary of information about the package (name, version, etc.). path Path to the file. Can either be an absolute path to a file on the minion, or a salt fileserver URL (e.g. ``salt://path/to/file.rpm``). If a salt fileserver URL is passed, the file will be cached to the minion so that it can be examined. saltenv : base Salt fileserver environment from which to retrieve the package. Ignored if ``path`` is a local file path on the minion. CLI Example: .. code-block:: bash salt '*' lowpkg.bin_pkg_info /root/salt-2015.5.1-2.el7.noarch.rpm salt '*' lowpkg.bin_pkg_info salt://salt-2015.5.1-2.el7.noarch.rpm ''' # If the path is a valid protocol, pull it down using cp.cache_file if __salt__['config.valid_fileproto'](path): newpath = __salt__['cp.cache_file'](path, saltenv) if not newpath: raise CommandExecutionError( 'Unable to retrieve {0} from saltenv \'{1}\'' .format(path, saltenv) ) path = newpath else: if not os.path.exists(path): raise CommandExecutionError( '{0} does not exist on minion'.format(path) ) elif not os.path.isabs(path): raise SaltInvocationError( '{0} does not exist on minion'.format(path) ) # REPOID is not a valid tag for the rpm command. Remove it and replace it # with 'none' queryformat = salt.utils.pkg.rpm.QUERYFORMAT.replace('%{REPOID}', 'none') output = __salt__['cmd.run_stdout']( ['rpm', '-qp', '--queryformat', queryformat, path], output_loglevel='trace', ignore_retcode=True, python_shell=False ) ret = {} pkginfo = salt.utils.pkg.rpm.parse_pkginfo( output, osarch=__grains__['osarch'] ) try: for field in pkginfo._fields: ret[field] = getattr(pkginfo, field) except AttributeError: # pkginfo is None return None return ret def list_pkgs(*packages, **kwargs): ''' List the packages currently installed in a dict:: {'<package_name>': '<version>'} root use root as top level directory (default: "/") CLI Example: .. code-block:: bash salt '*' lowpkg.list_pkgs ''' pkgs = {} cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['-q' if packages else '-qa', '--queryformat', r'%{NAME} %{VERSION}\n']) if packages: cmd.extend(packages) out = __salt__['cmd.run'](cmd, output_loglevel='trace', python_shell=False) for line in salt.utils.itertools.split(out, '\n'): if 'is not installed' in line: continue comps = line.split() pkgs[comps[0]] = comps[1] return pkgs def verify(*packages, **kwargs): ''' Runs an rpm -Va on a system, and returns the results in a dict root use root as top level directory (default: "/") Files with an attribute of config, doc, ghost, license or readme in the package header can be ignored using the ``ignore_types`` keyword argument CLI Example: .. code-block:: bash salt '*' lowpkg.verify salt '*' lowpkg.verify httpd salt '*' lowpkg.verify httpd postfix salt '*' lowpkg.verify httpd postfix ignore_types=['config','doc'] ''' ftypes = {'c': 'config', 'd': 'doc', 'g': 'ghost', 'l': 'license', 'r': 'readme'} ret = {} ignore_types = kwargs.get('ignore_types', []) if not isinstance(ignore_types, (list, six.string_types)): raise SaltInvocationError( 'ignore_types must be a list or a comma-separated string' ) if isinstance(ignore_types, six.string_types): try: ignore_types = [x.strip() for x in ignore_types.split(',')] except AttributeError: ignore_types = [x.strip() for x in six.text_type(ignore_types).split(',')] verify_options = kwargs.get('verify_options', []) if not isinstance(verify_options, (list, six.string_types)): raise SaltInvocationError( 'verify_options must be a list or a comma-separated string' ) if isinstance(verify_options, six.string_types): try: verify_options = [x.strip() for x in verify_options.split(',')] except AttributeError: verify_options = [x.strip() for x in six.text_type(verify_options).split(',')] cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['--' + x for x in verify_options]) if packages: cmd.append('-V') # Can't concatenate a tuple, must do a list.extend() cmd.extend(packages) else: cmd.append('-Va') out = __salt__['cmd.run_all'](cmd, output_loglevel='trace', ignore_retcode=True, python_shell=False) if not out['stdout'].strip() and out['retcode'] != 0: # If there is no stdout and the retcode is 0, then verification # succeeded, but if the retcode is nonzero, then the command failed. msg = 'Failed to verify package(s)' if out['stderr']: msg += ': {0}'.format(out['stderr']) raise CommandExecutionError(msg) for line in salt.utils.itertools.split(out['stdout'], '\n'): fdict = {'mismatch': []} if 'missing' in line: line = ' ' + line fdict['missing'] = True del fdict['mismatch'] fname = line[13:] if line[11:12] in ftypes: fdict['type'] = ftypes[line[11:12]] if 'type' not in fdict or fdict['type'] not in ignore_types: if line[0:1] == 'S': fdict['mismatch'].append('size') if line[1:2] == 'M': fdict['mismatch'].append('mode') if line[2:3] == '5': fdict['mismatch'].append('md5sum') if line[3:4] == 'D': fdict['mismatch'].append('device major/minor number') if line[4:5] == 'L': fdict['mismatch'].append('readlink path') if line[5:6] == 'U': fdict['mismatch'].append('user') if line[6:7] == 'G': fdict['mismatch'].append('group') if line[7:8] == 'T': fdict['mismatch'].append('mtime') if line[8:9] == 'P': fdict['mismatch'].append('capabilities') ret[fname] = fdict return ret def modified(*packages, **flags): ''' List the modified files that belong to a package. Not specifying any packages will return a list of _all_ modified files on the system's RPM database. .. versionadded:: 2015.5.0 root use root as top level directory (default: "/") CLI examples: .. code-block:: bash salt '*' lowpkg.modified httpd salt '*' lowpkg.modified httpd postfix salt '*' lowpkg.modified ''' cmd = ['rpm'] if flags.get('root'): cmd.extend(['--root', flags.pop('root')]) cmd.append('-Va') cmd.extend(packages) ret = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False) data = {} # If verification has an output, then it means it failed # and the return code will be 1. We are interested in any bigger # than 1 code. if ret['retcode'] > 1: del ret['stdout'] return ret elif not ret['retcode']: return data ptrn = re.compile(r"\s+") changes = cfg = f_name = None for f_info in salt.utils.itertools.split(ret['stdout'], '\n'): f_info = ptrn.split(f_info) if len(f_info) == 3: # Config file changes, cfg, f_name = f_info else: changes, f_name = f_info cfg = None keys = ['size', 'mode', 'checksum', 'device', 'symlink', 'owner', 'group', 'time', 'capabilities'] changes = list(changes) if len(changes) == 8: # Older RPMs do not support capabilities changes.append('.') stats = [] for k, v in zip(keys, changes): if v != '.': stats.append(k) if cfg is not None: stats.append('config') data[f_name] = stats if not flags: return data # Filtering filtered_data = {} for f_name, stats in data.items(): include = True for param, pval in flags.items(): if param.startswith("_"): continue if (not pval and param in stats) or \ (pval and param not in stats): include = False break if include: filtered_data[f_name] = stats return filtered_data def file_list(*packages, **kwargs): ''' List the files that belong to a package. Not specifying any packages will return a list of _every_ file on the system's rpm database (not generally recommended). root use root as top level directory (default: "/") CLI Examples: .. code-block:: bash salt '*' lowpkg.file_list httpd salt '*' lowpkg.file_list httpd postfix salt '*' lowpkg.file_list ''' cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.append('-ql' if packages else '-qla') if packages: # Can't concatenate a tuple, must do a list.extend() cmd.extend(packages) ret = __salt__['cmd.run']( cmd, output_loglevel='trace', python_shell=False).splitlines() return {'errors': [], 'files': ret} def file_dict(*packages, **kwargs): ''' List the files that belong to a package, sorted by group. Not specifying any packages will return a list of _every_ file on the system's rpm database (not generally recommended). root use root as top level directory (default: "/") CLI Examples: .. code-block:: bash salt '*' lowpkg.file_dict httpd salt '*' lowpkg.file_dict httpd postfix salt '*' lowpkg.file_dict ''' errors = [] ret = {} pkgs = {} cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['-q' if packages else '-qa', '--queryformat', r'%{NAME} %{VERSION}\n']) if packages: cmd.extend(packages) out = __salt__['cmd.run'](cmd, output_loglevel='trace', python_shell=False) for line in salt.utils.itertools.split(out, '\n'): if 'is not installed' in line: errors.append(line) continue comps = line.split() pkgs[comps[0]] = {'version': comps[1]} for pkg in pkgs: cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['-ql', pkg]) out = __salt__['cmd.run']( ['rpm', '-ql', pkg], output_loglevel='trace', python_shell=False) ret[pkg] = out.splitlines() return {'errors': errors, 'packages': ret} def owner(*paths, **kwargs): ''' Return the name of the package that owns the file. Multiple file paths can be passed. If a single path is passed, a string will be returned, and if multiple paths are passed, a dictionary of file/package name pairs will be returned. If the file is not owned by a package, or is not present on the minion, then an empty string will be returned for that path. root use root as top level directory (default: "/") CLI Examples: .. code-block:: bash salt '*' lowpkg.owner /usr/bin/apachectl salt '*' lowpkg.owner /usr/bin/apachectl /etc/httpd/conf/httpd.conf ''' if not paths: return '' ret = {} for path in paths: cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['-qf', '--queryformat', '%{name}', path]) ret[path] = __salt__['cmd.run_stdout'](cmd, output_loglevel='trace', python_shell=False) if 'not owned' in ret[path].lower(): ret[path] = '' if len(ret) == 1: return list(ret.values())[0] return ret @salt.utils.decorators.path.which('rpm2cpio') @salt.utils.decorators.path.which('cpio') @salt.utils.decorators.path.which('diff') def info(*packages, **kwargs): ''' Return a detailed package(s) summary information. If no packages specified, all packages will be returned. :param packages: :param attr: Comma-separated package attributes. If no 'attr' is specified, all available attributes returned. Valid attributes are: version, vendor, release, build_date, build_date_time_t, install_date, install_date_time_t, build_host, group, source_rpm, arch, epoch, size, license, signature, packager, url, summary, description. :param all_versions: Return information for all installed versions of the packages :param root: use root as top level directory (default: "/") :return: CLI example: .. code-block:: bash salt '*' lowpkg.info apache2 bash salt '*' lowpkg.info apache2 bash attr=version salt '*' lowpkg.info apache2 bash attr=version,build_date_iso,size salt '*' lowpkg.info apache2 bash attr=version,build_date_iso,size all_versions=True ''' all_versions = kwargs.get('all_versions', False) # LONGSIZE is not a valid tag for all versions of rpm. If LONGSIZE isn't # available, then we can just use SIZE for older versions. See Issue #31366. rpm_tags = __salt__['cmd.run_stdout']( ['rpm', '--querytags'], python_shell=False).splitlines() if 'LONGSIZE' in rpm_tags: size_tag = '%{LONGSIZE}' else: size_tag = '%{SIZE}' cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) if packages: cmd.append('-q') cmd.extend(packages) else: cmd.append('-qa') # Construct query format attr_map = { "name": "name: %{NAME}\\n", "relocations": "relocations: %|PREFIXES?{[%{PREFIXES} ]}:{(not relocatable)}|\\n", "version": "version: %{VERSION}\\n", "vendor": "vendor: %{VENDOR}\\n", "release": "release: %{RELEASE}\\n", "epoch": "%|EPOCH?{epoch: %{EPOCH}\\n}|", "build_date_time_t": "build_date_time_t: %{BUILDTIME}\\n", "build_date": "build_date: %{BUILDTIME}\\n", "install_date_time_t": "install_date_time_t: %|INSTALLTIME?{%{INSTALLTIME}}:{(not installed)}|\\n", "install_date": "install_date: %|INSTALLTIME?{%{INSTALLTIME}}:{(not installed)}|\\n", "build_host": "build_host: %{BUILDHOST}\\n", "group": "group: %{GROUP}\\n", "source_rpm": "source_rpm: %{SOURCERPM}\\n", "size": "size: " + size_tag + "\\n", "arch": "arch: %{ARCH}\\n", "license": "%|LICENSE?{license: %{LICENSE}\\n}|", "signature": "signature: %|DSAHEADER?{%{DSAHEADER:pgpsig}}:{%|RSAHEADER?{%{RSAHEADER:pgpsig}}:" "{%|SIGGPG?{%{SIGGPG:pgpsig}}:{%|SIGPGP?{%{SIGPGP:pgpsig}}:{(none)}|}|}|}|\\n", "packager": "%|PACKAGER?{packager: %{PACKAGER}\\n}|", "url": "%|URL?{url: %{URL}\\n}|", "summary": "summary: %{SUMMARY}\\n", "description": "description:\\n%{DESCRIPTION}\\n", "edition": "edition: %|EPOCH?{%{EPOCH}:}|%{VERSION}-%{RELEASE}\\n", } attr = kwargs.get('attr', None) and kwargs['attr'].split(",") or None query = list() if attr: for attr_k in attr: if attr_k in attr_map and attr_k != 'description': query.append(attr_map[attr_k]) if not query: raise CommandExecutionError('No valid attributes found.') if 'name' not in attr: attr.append('name') query.append(attr_map['name']) if 'edition' not in attr: attr.append('edition') query.append(attr_map['edition']) else: for attr_k, attr_v in six.iteritems(attr_map): if attr_k != 'description': query.append(attr_v) if attr and 'description' in attr or not attr: query.append(attr_map['description']) query.append("-----\\n") cmd = ' '.join(cmd) call = __salt__['cmd.run_all'](cmd + (" --queryformat '{0}'".format(''.join(query))), output_loglevel='trace', env={'TZ': 'UTC'}, clean_env=True) if call['retcode'] != 0: comment = '' if 'stderr' in call: comment += (call['stderr'] or call['stdout']) raise CommandExecutionError(comment) elif 'error' in call['stderr']: raise CommandExecutionError(call['stderr']) else: out = call['stdout'] _ret = list() for pkg_info in re.split(r"----*", out): pkg_info = pkg_info.strip() if not pkg_info: continue pkg_info = pkg_info.split(os.linesep) if pkg_info[-1].lower().startswith('distribution'): pkg_info = pkg_info[:-1] pkg_data = dict() pkg_name = None descr_marker = False descr = list() for line in pkg_info: if descr_marker: descr.append(line) continue line = [item.strip() for item in line.split(':', 1)] if len(line) != 2: continue key, value = line if key == 'description': descr_marker = True continue if key == 'name': pkg_name = value # Convert Unix ticks into ISO time format if key in ['build_date', 'install_date']: try: pkg_data[key] = datetime.datetime.utcfromtimestamp(int(value)).isoformat() + "Z" except ValueError: log.warning('Could not convert "%s" into Unix time', value) continue # Convert Unix ticks into an Integer if key in ['build_date_time_t', 'install_date_time_t']: try: pkg_data[key] = int(value) except ValueError: log.warning('Could not convert "%s" into Unix time', value) continue if key not in ['description', 'name'] and value: pkg_data[key] = value if attr and 'description' in attr or not attr: pkg_data['description'] = os.linesep.join(descr) if pkg_name: pkg_data['name'] = pkg_name _ret.append(pkg_data) # Force-sort package data by version, # pick only latest versions # (in case multiple packages installed, e.g. kernel) ret = dict() for pkg_data in reversed(sorted(_ret, key=lambda x: LooseVersion(x['edition']))): pkg_name = pkg_data.pop('name') # Filter out GPG public keys packages if pkg_name.startswith('gpg-pubkey'): continue if pkg_name not in ret: if all_versions: ret[pkg_name] = [pkg_data.copy()] else: ret[pkg_name] = pkg_data.copy() del ret[pkg_name]['edition'] elif all_versions: ret[pkg_name].append(pkg_data.copy()) return ret def version_cmp(ver1, ver2, ignore_epoch=False): ''' .. versionadded:: 2015.8.9 Do a cmp-style comparison on two packages. Return -1 if ver1 < ver2, 0 if ver1 == ver2, and 1 if ver1 > ver2. Return None if there was a problem making the comparison. ignore_epoch : False Set to ``True`` to ignore the epoch when comparing versions .. versionadded:: 2015.8.10,2016.3.2 CLI Example: .. code-block:: bash salt '*' pkg.version_cmp '0.2-001' '0.2.0.1-002' ''' normalize = lambda x: six.text_type(x).split(':', 1)[-1] \ if ignore_epoch \ else six.text_type(x) ver1 = normalize(ver1) ver2 = normalize(ver2) try: cmp_func = None if HAS_RPM: try: cmp_func = rpm.labelCompare except AttributeError: # Catches corner case where someone has a module named "rpm" in # their pythonpath. log.debug( 'rpm module imported, but it does not have the ' 'labelCompare function. Not using rpm.labelCompare for ' 'version comparison.' ) if cmp_func is None and HAS_RPMUTILS: try: cmp_func = rpmUtils.miscutils.compareEVR except AttributeError: log.debug('rpmUtils.miscutils.compareEVR is not available') if cmp_func is None: if salt.utils.path.which('rpmdev-vercmp'): # rpmdev-vercmp always uses epochs, even when zero def _ensure_epoch(ver): def _prepend(ver): return '0:{0}'.format(ver) try: if ':' not in ver: return _prepend(ver) except TypeError: return _prepend(ver) return ver ver1 = _ensure_epoch(ver1) ver2 = _ensure_epoch(ver2) result = __salt__['cmd.run_all']( ['rpmdev-vercmp', ver1, ver2], python_shell=False, redirect_stderr=True, ignore_retcode=True) # rpmdev-vercmp returns 0 on equal, 11 on greater-than, and # 12 on less-than. if result['retcode'] == 0: return 0 elif result['retcode'] == 11: return 1 elif result['retcode'] == 12: return -1 else: # We'll need to fall back to salt.utils.versions.version_cmp() log.warning( 'Failed to interpret results of rpmdev-vercmp output. ' 'This is probably a bug, and should be reported. ' 'Return code was %s. Output: %s', result['retcode'], result['stdout'] ) else: # We'll need to fall back to salt.utils.versions.version_cmp() log.warning( 'rpmdevtools is not installed, please install it for ' 'more accurate version comparisons' ) else: # If one EVR is missing a release but not the other and they # otherwise would be equal, ignore the release. This can happen if # e.g. you are checking if a package version 3.2 is satisfied by # 3.2-1. (ver1_e, ver1_v, ver1_r) = salt.utils.pkg.rpm.version_to_evr(ver1) (ver2_e, ver2_v, ver2_r) = salt.utils.pkg.rpm.version_to_evr(ver2) if not ver1_r or not ver2_r: ver1_r = ver2_r = '' cmp_result = cmp_func((ver1_e, ver1_v, ver1_r), (ver2_e, ver2_v, ver2_r)) if cmp_result not in (-1, 0, 1): raise CommandExecutionError( 'Comparison result \'{0}\' is invalid'.format(cmp_result) ) return cmp_result except Exception as exc: log.warning( 'Failed to compare version \'%s\' to \'%s\' using RPM: %s', ver1, ver2, exc ) # We would already have normalized the versions at the beginning of this # function if ignore_epoch=True, so avoid unnecessary work and just pass # False for this value. return salt.utils.versions.version_cmp(ver1, ver2, ignore_epoch=False) def checksum(*paths, **kwargs): ''' Return if the signature of a RPM file is valid. root use root as top level directory (default: "/") CLI Example: .. code-block:: bash salt '*' lowpkg.checksum /path/to/package1.rpm salt '*' lowpkg.checksum /path/to/package1.rpm /path/to/package2.rpm ''' ret = dict() if not paths: raise CommandExecutionError("No package files has been specified.") cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['-K', '--quiet']) for package_file in paths: cmd_ = cmd + [package_file] ret[package_file] = (bool(__salt__['file.file_exists'](package_file)) and not __salt__['cmd.retcode'](cmd_, ignore_retcode=True, output_loglevel='trace', python_shell=False)) return ret
saltstack/salt
salt/modules/rpm_lowpkg.py
info
python
def info(*packages, **kwargs): ''' Return a detailed package(s) summary information. If no packages specified, all packages will be returned. :param packages: :param attr: Comma-separated package attributes. If no 'attr' is specified, all available attributes returned. Valid attributes are: version, vendor, release, build_date, build_date_time_t, install_date, install_date_time_t, build_host, group, source_rpm, arch, epoch, size, license, signature, packager, url, summary, description. :param all_versions: Return information for all installed versions of the packages :param root: use root as top level directory (default: "/") :return: CLI example: .. code-block:: bash salt '*' lowpkg.info apache2 bash salt '*' lowpkg.info apache2 bash attr=version salt '*' lowpkg.info apache2 bash attr=version,build_date_iso,size salt '*' lowpkg.info apache2 bash attr=version,build_date_iso,size all_versions=True ''' all_versions = kwargs.get('all_versions', False) # LONGSIZE is not a valid tag for all versions of rpm. If LONGSIZE isn't # available, then we can just use SIZE for older versions. See Issue #31366. rpm_tags = __salt__['cmd.run_stdout']( ['rpm', '--querytags'], python_shell=False).splitlines() if 'LONGSIZE' in rpm_tags: size_tag = '%{LONGSIZE}' else: size_tag = '%{SIZE}' cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) if packages: cmd.append('-q') cmd.extend(packages) else: cmd.append('-qa') # Construct query format attr_map = { "name": "name: %{NAME}\\n", "relocations": "relocations: %|PREFIXES?{[%{PREFIXES} ]}:{(not relocatable)}|\\n", "version": "version: %{VERSION}\\n", "vendor": "vendor: %{VENDOR}\\n", "release": "release: %{RELEASE}\\n", "epoch": "%|EPOCH?{epoch: %{EPOCH}\\n}|", "build_date_time_t": "build_date_time_t: %{BUILDTIME}\\n", "build_date": "build_date: %{BUILDTIME}\\n", "install_date_time_t": "install_date_time_t: %|INSTALLTIME?{%{INSTALLTIME}}:{(not installed)}|\\n", "install_date": "install_date: %|INSTALLTIME?{%{INSTALLTIME}}:{(not installed)}|\\n", "build_host": "build_host: %{BUILDHOST}\\n", "group": "group: %{GROUP}\\n", "source_rpm": "source_rpm: %{SOURCERPM}\\n", "size": "size: " + size_tag + "\\n", "arch": "arch: %{ARCH}\\n", "license": "%|LICENSE?{license: %{LICENSE}\\n}|", "signature": "signature: %|DSAHEADER?{%{DSAHEADER:pgpsig}}:{%|RSAHEADER?{%{RSAHEADER:pgpsig}}:" "{%|SIGGPG?{%{SIGGPG:pgpsig}}:{%|SIGPGP?{%{SIGPGP:pgpsig}}:{(none)}|}|}|}|\\n", "packager": "%|PACKAGER?{packager: %{PACKAGER}\\n}|", "url": "%|URL?{url: %{URL}\\n}|", "summary": "summary: %{SUMMARY}\\n", "description": "description:\\n%{DESCRIPTION}\\n", "edition": "edition: %|EPOCH?{%{EPOCH}:}|%{VERSION}-%{RELEASE}\\n", } attr = kwargs.get('attr', None) and kwargs['attr'].split(",") or None query = list() if attr: for attr_k in attr: if attr_k in attr_map and attr_k != 'description': query.append(attr_map[attr_k]) if not query: raise CommandExecutionError('No valid attributes found.') if 'name' not in attr: attr.append('name') query.append(attr_map['name']) if 'edition' not in attr: attr.append('edition') query.append(attr_map['edition']) else: for attr_k, attr_v in six.iteritems(attr_map): if attr_k != 'description': query.append(attr_v) if attr and 'description' in attr or not attr: query.append(attr_map['description']) query.append("-----\\n") cmd = ' '.join(cmd) call = __salt__['cmd.run_all'](cmd + (" --queryformat '{0}'".format(''.join(query))), output_loglevel='trace', env={'TZ': 'UTC'}, clean_env=True) if call['retcode'] != 0: comment = '' if 'stderr' in call: comment += (call['stderr'] or call['stdout']) raise CommandExecutionError(comment) elif 'error' in call['stderr']: raise CommandExecutionError(call['stderr']) else: out = call['stdout'] _ret = list() for pkg_info in re.split(r"----*", out): pkg_info = pkg_info.strip() if not pkg_info: continue pkg_info = pkg_info.split(os.linesep) if pkg_info[-1].lower().startswith('distribution'): pkg_info = pkg_info[:-1] pkg_data = dict() pkg_name = None descr_marker = False descr = list() for line in pkg_info: if descr_marker: descr.append(line) continue line = [item.strip() for item in line.split(':', 1)] if len(line) != 2: continue key, value = line if key == 'description': descr_marker = True continue if key == 'name': pkg_name = value # Convert Unix ticks into ISO time format if key in ['build_date', 'install_date']: try: pkg_data[key] = datetime.datetime.utcfromtimestamp(int(value)).isoformat() + "Z" except ValueError: log.warning('Could not convert "%s" into Unix time', value) continue # Convert Unix ticks into an Integer if key in ['build_date_time_t', 'install_date_time_t']: try: pkg_data[key] = int(value) except ValueError: log.warning('Could not convert "%s" into Unix time', value) continue if key not in ['description', 'name'] and value: pkg_data[key] = value if attr and 'description' in attr or not attr: pkg_data['description'] = os.linesep.join(descr) if pkg_name: pkg_data['name'] = pkg_name _ret.append(pkg_data) # Force-sort package data by version, # pick only latest versions # (in case multiple packages installed, e.g. kernel) ret = dict() for pkg_data in reversed(sorted(_ret, key=lambda x: LooseVersion(x['edition']))): pkg_name = pkg_data.pop('name') # Filter out GPG public keys packages if pkg_name.startswith('gpg-pubkey'): continue if pkg_name not in ret: if all_versions: ret[pkg_name] = [pkg_data.copy()] else: ret[pkg_name] = pkg_data.copy() del ret[pkg_name]['edition'] elif all_versions: ret[pkg_name].append(pkg_data.copy()) return ret
Return a detailed package(s) summary information. If no packages specified, all packages will be returned. :param packages: :param attr: Comma-separated package attributes. If no 'attr' is specified, all available attributes returned. Valid attributes are: version, vendor, release, build_date, build_date_time_t, install_date, install_date_time_t, build_host, group, source_rpm, arch, epoch, size, license, signature, packager, url, summary, description. :param all_versions: Return information for all installed versions of the packages :param root: use root as top level directory (default: "/") :return: CLI example: .. code-block:: bash salt '*' lowpkg.info apache2 bash salt '*' lowpkg.info apache2 bash attr=version salt '*' lowpkg.info apache2 bash attr=version,build_date_iso,size salt '*' lowpkg.info apache2 bash attr=version,build_date_iso,size all_versions=True
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/rpm_lowpkg.py#L493-L674
null
# -*- coding: utf-8 -*- ''' Support for rpm ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import logging import os import re import datetime from salt.utils.versions import LooseVersion # Import Salt libs import salt.utils.decorators.path import salt.utils.itertools import salt.utils.path import salt.utils.pkg.rpm import salt.utils.versions # pylint: disable=import-error,redefined-builtin from salt.ext.six.moves import zip from salt.ext import six try: import rpm HAS_RPM = True except ImportError: HAS_RPM = False try: import rpmUtils.miscutils HAS_RPMUTILS = True except ImportError: HAS_RPMUTILS = False # pylint: enable=import-error,redefined-builtin from salt.exceptions import CommandExecutionError, SaltInvocationError log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'lowpkg' def __virtual__(): ''' Confine this module to rpm based systems ''' if not salt.utils.path.which('rpm'): return (False, 'The rpm execution module failed to load: rpm binary is not in the path.') try: os_grain = __grains__['os'].lower() os_family = __grains__['os_family'].lower() except Exception: return (False, 'The rpm execution module failed to load: failed to detect os or os_family grains.') enabled = ('amazon', 'xcp', 'xenserver', 'VirtuozzoLinux') if os_family in ['redhat', 'suse'] or os_grain in enabled: return __virtualname__ return (False, 'The rpm execution module failed to load: only available on redhat/suse type systems ' 'or amazon, xcp or xenserver.') def bin_pkg_info(path, saltenv='base'): ''' .. versionadded:: 2015.8.0 Parses RPM metadata and returns a dictionary of information about the package (name, version, etc.). path Path to the file. Can either be an absolute path to a file on the minion, or a salt fileserver URL (e.g. ``salt://path/to/file.rpm``). If a salt fileserver URL is passed, the file will be cached to the minion so that it can be examined. saltenv : base Salt fileserver environment from which to retrieve the package. Ignored if ``path`` is a local file path on the minion. CLI Example: .. code-block:: bash salt '*' lowpkg.bin_pkg_info /root/salt-2015.5.1-2.el7.noarch.rpm salt '*' lowpkg.bin_pkg_info salt://salt-2015.5.1-2.el7.noarch.rpm ''' # If the path is a valid protocol, pull it down using cp.cache_file if __salt__['config.valid_fileproto'](path): newpath = __salt__['cp.cache_file'](path, saltenv) if not newpath: raise CommandExecutionError( 'Unable to retrieve {0} from saltenv \'{1}\'' .format(path, saltenv) ) path = newpath else: if not os.path.exists(path): raise CommandExecutionError( '{0} does not exist on minion'.format(path) ) elif not os.path.isabs(path): raise SaltInvocationError( '{0} does not exist on minion'.format(path) ) # REPOID is not a valid tag for the rpm command. Remove it and replace it # with 'none' queryformat = salt.utils.pkg.rpm.QUERYFORMAT.replace('%{REPOID}', 'none') output = __salt__['cmd.run_stdout']( ['rpm', '-qp', '--queryformat', queryformat, path], output_loglevel='trace', ignore_retcode=True, python_shell=False ) ret = {} pkginfo = salt.utils.pkg.rpm.parse_pkginfo( output, osarch=__grains__['osarch'] ) try: for field in pkginfo._fields: ret[field] = getattr(pkginfo, field) except AttributeError: # pkginfo is None return None return ret def list_pkgs(*packages, **kwargs): ''' List the packages currently installed in a dict:: {'<package_name>': '<version>'} root use root as top level directory (default: "/") CLI Example: .. code-block:: bash salt '*' lowpkg.list_pkgs ''' pkgs = {} cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['-q' if packages else '-qa', '--queryformat', r'%{NAME} %{VERSION}\n']) if packages: cmd.extend(packages) out = __salt__['cmd.run'](cmd, output_loglevel='trace', python_shell=False) for line in salt.utils.itertools.split(out, '\n'): if 'is not installed' in line: continue comps = line.split() pkgs[comps[0]] = comps[1] return pkgs def verify(*packages, **kwargs): ''' Runs an rpm -Va on a system, and returns the results in a dict root use root as top level directory (default: "/") Files with an attribute of config, doc, ghost, license or readme in the package header can be ignored using the ``ignore_types`` keyword argument CLI Example: .. code-block:: bash salt '*' lowpkg.verify salt '*' lowpkg.verify httpd salt '*' lowpkg.verify httpd postfix salt '*' lowpkg.verify httpd postfix ignore_types=['config','doc'] ''' ftypes = {'c': 'config', 'd': 'doc', 'g': 'ghost', 'l': 'license', 'r': 'readme'} ret = {} ignore_types = kwargs.get('ignore_types', []) if not isinstance(ignore_types, (list, six.string_types)): raise SaltInvocationError( 'ignore_types must be a list or a comma-separated string' ) if isinstance(ignore_types, six.string_types): try: ignore_types = [x.strip() for x in ignore_types.split(',')] except AttributeError: ignore_types = [x.strip() for x in six.text_type(ignore_types).split(',')] verify_options = kwargs.get('verify_options', []) if not isinstance(verify_options, (list, six.string_types)): raise SaltInvocationError( 'verify_options must be a list or a comma-separated string' ) if isinstance(verify_options, six.string_types): try: verify_options = [x.strip() for x in verify_options.split(',')] except AttributeError: verify_options = [x.strip() for x in six.text_type(verify_options).split(',')] cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['--' + x for x in verify_options]) if packages: cmd.append('-V') # Can't concatenate a tuple, must do a list.extend() cmd.extend(packages) else: cmd.append('-Va') out = __salt__['cmd.run_all'](cmd, output_loglevel='trace', ignore_retcode=True, python_shell=False) if not out['stdout'].strip() and out['retcode'] != 0: # If there is no stdout and the retcode is 0, then verification # succeeded, but if the retcode is nonzero, then the command failed. msg = 'Failed to verify package(s)' if out['stderr']: msg += ': {0}'.format(out['stderr']) raise CommandExecutionError(msg) for line in salt.utils.itertools.split(out['stdout'], '\n'): fdict = {'mismatch': []} if 'missing' in line: line = ' ' + line fdict['missing'] = True del fdict['mismatch'] fname = line[13:] if line[11:12] in ftypes: fdict['type'] = ftypes[line[11:12]] if 'type' not in fdict or fdict['type'] not in ignore_types: if line[0:1] == 'S': fdict['mismatch'].append('size') if line[1:2] == 'M': fdict['mismatch'].append('mode') if line[2:3] == '5': fdict['mismatch'].append('md5sum') if line[3:4] == 'D': fdict['mismatch'].append('device major/minor number') if line[4:5] == 'L': fdict['mismatch'].append('readlink path') if line[5:6] == 'U': fdict['mismatch'].append('user') if line[6:7] == 'G': fdict['mismatch'].append('group') if line[7:8] == 'T': fdict['mismatch'].append('mtime') if line[8:9] == 'P': fdict['mismatch'].append('capabilities') ret[fname] = fdict return ret def modified(*packages, **flags): ''' List the modified files that belong to a package. Not specifying any packages will return a list of _all_ modified files on the system's RPM database. .. versionadded:: 2015.5.0 root use root as top level directory (default: "/") CLI examples: .. code-block:: bash salt '*' lowpkg.modified httpd salt '*' lowpkg.modified httpd postfix salt '*' lowpkg.modified ''' cmd = ['rpm'] if flags.get('root'): cmd.extend(['--root', flags.pop('root')]) cmd.append('-Va') cmd.extend(packages) ret = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False) data = {} # If verification has an output, then it means it failed # and the return code will be 1. We are interested in any bigger # than 1 code. if ret['retcode'] > 1: del ret['stdout'] return ret elif not ret['retcode']: return data ptrn = re.compile(r"\s+") changes = cfg = f_name = None for f_info in salt.utils.itertools.split(ret['stdout'], '\n'): f_info = ptrn.split(f_info) if len(f_info) == 3: # Config file changes, cfg, f_name = f_info else: changes, f_name = f_info cfg = None keys = ['size', 'mode', 'checksum', 'device', 'symlink', 'owner', 'group', 'time', 'capabilities'] changes = list(changes) if len(changes) == 8: # Older RPMs do not support capabilities changes.append('.') stats = [] for k, v in zip(keys, changes): if v != '.': stats.append(k) if cfg is not None: stats.append('config') data[f_name] = stats if not flags: return data # Filtering filtered_data = {} for f_name, stats in data.items(): include = True for param, pval in flags.items(): if param.startswith("_"): continue if (not pval and param in stats) or \ (pval and param not in stats): include = False break if include: filtered_data[f_name] = stats return filtered_data def file_list(*packages, **kwargs): ''' List the files that belong to a package. Not specifying any packages will return a list of _every_ file on the system's rpm database (not generally recommended). root use root as top level directory (default: "/") CLI Examples: .. code-block:: bash salt '*' lowpkg.file_list httpd salt '*' lowpkg.file_list httpd postfix salt '*' lowpkg.file_list ''' cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.append('-ql' if packages else '-qla') if packages: # Can't concatenate a tuple, must do a list.extend() cmd.extend(packages) ret = __salt__['cmd.run']( cmd, output_loglevel='trace', python_shell=False).splitlines() return {'errors': [], 'files': ret} def file_dict(*packages, **kwargs): ''' List the files that belong to a package, sorted by group. Not specifying any packages will return a list of _every_ file on the system's rpm database (not generally recommended). root use root as top level directory (default: "/") CLI Examples: .. code-block:: bash salt '*' lowpkg.file_dict httpd salt '*' lowpkg.file_dict httpd postfix salt '*' lowpkg.file_dict ''' errors = [] ret = {} pkgs = {} cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['-q' if packages else '-qa', '--queryformat', r'%{NAME} %{VERSION}\n']) if packages: cmd.extend(packages) out = __salt__['cmd.run'](cmd, output_loglevel='trace', python_shell=False) for line in salt.utils.itertools.split(out, '\n'): if 'is not installed' in line: errors.append(line) continue comps = line.split() pkgs[comps[0]] = {'version': comps[1]} for pkg in pkgs: cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['-ql', pkg]) out = __salt__['cmd.run']( ['rpm', '-ql', pkg], output_loglevel='trace', python_shell=False) ret[pkg] = out.splitlines() return {'errors': errors, 'packages': ret} def owner(*paths, **kwargs): ''' Return the name of the package that owns the file. Multiple file paths can be passed. If a single path is passed, a string will be returned, and if multiple paths are passed, a dictionary of file/package name pairs will be returned. If the file is not owned by a package, or is not present on the minion, then an empty string will be returned for that path. root use root as top level directory (default: "/") CLI Examples: .. code-block:: bash salt '*' lowpkg.owner /usr/bin/apachectl salt '*' lowpkg.owner /usr/bin/apachectl /etc/httpd/conf/httpd.conf ''' if not paths: return '' ret = {} for path in paths: cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['-qf', '--queryformat', '%{name}', path]) ret[path] = __salt__['cmd.run_stdout'](cmd, output_loglevel='trace', python_shell=False) if 'not owned' in ret[path].lower(): ret[path] = '' if len(ret) == 1: return list(ret.values())[0] return ret @salt.utils.decorators.path.which('rpm2cpio') @salt.utils.decorators.path.which('cpio') @salt.utils.decorators.path.which('diff') def diff(package_path, path): ''' Return a formatted diff between current file and original in a package. NOTE: this function includes all files (configuration and not), but does not work on binary content. :param package: Full pack of the RPM file :param path: Full path to the installed file :return: Difference or empty string. For binary files only a notification. CLI example: .. code-block:: bash salt '*' lowpkg.diff /path/to/apache2.rpm /etc/apache2/httpd.conf ''' cmd = "rpm2cpio {0} " \ "| cpio -i --quiet --to-stdout .{1} " \ "| diff -u --label 'A {1}' --from-file=- --label 'B {1}' {1}" res = __salt__['cmd.shell'](cmd.format(package_path, path), output_loglevel='trace') if res and res.startswith('Binary file'): return 'File \'{0}\' is binary and its content has been ' \ 'modified.'.format(path) return res def version_cmp(ver1, ver2, ignore_epoch=False): ''' .. versionadded:: 2015.8.9 Do a cmp-style comparison on two packages. Return -1 if ver1 < ver2, 0 if ver1 == ver2, and 1 if ver1 > ver2. Return None if there was a problem making the comparison. ignore_epoch : False Set to ``True`` to ignore the epoch when comparing versions .. versionadded:: 2015.8.10,2016.3.2 CLI Example: .. code-block:: bash salt '*' pkg.version_cmp '0.2-001' '0.2.0.1-002' ''' normalize = lambda x: six.text_type(x).split(':', 1)[-1] \ if ignore_epoch \ else six.text_type(x) ver1 = normalize(ver1) ver2 = normalize(ver2) try: cmp_func = None if HAS_RPM: try: cmp_func = rpm.labelCompare except AttributeError: # Catches corner case where someone has a module named "rpm" in # their pythonpath. log.debug( 'rpm module imported, but it does not have the ' 'labelCompare function. Not using rpm.labelCompare for ' 'version comparison.' ) if cmp_func is None and HAS_RPMUTILS: try: cmp_func = rpmUtils.miscutils.compareEVR except AttributeError: log.debug('rpmUtils.miscutils.compareEVR is not available') if cmp_func is None: if salt.utils.path.which('rpmdev-vercmp'): # rpmdev-vercmp always uses epochs, even when zero def _ensure_epoch(ver): def _prepend(ver): return '0:{0}'.format(ver) try: if ':' not in ver: return _prepend(ver) except TypeError: return _prepend(ver) return ver ver1 = _ensure_epoch(ver1) ver2 = _ensure_epoch(ver2) result = __salt__['cmd.run_all']( ['rpmdev-vercmp', ver1, ver2], python_shell=False, redirect_stderr=True, ignore_retcode=True) # rpmdev-vercmp returns 0 on equal, 11 on greater-than, and # 12 on less-than. if result['retcode'] == 0: return 0 elif result['retcode'] == 11: return 1 elif result['retcode'] == 12: return -1 else: # We'll need to fall back to salt.utils.versions.version_cmp() log.warning( 'Failed to interpret results of rpmdev-vercmp output. ' 'This is probably a bug, and should be reported. ' 'Return code was %s. Output: %s', result['retcode'], result['stdout'] ) else: # We'll need to fall back to salt.utils.versions.version_cmp() log.warning( 'rpmdevtools is not installed, please install it for ' 'more accurate version comparisons' ) else: # If one EVR is missing a release but not the other and they # otherwise would be equal, ignore the release. This can happen if # e.g. you are checking if a package version 3.2 is satisfied by # 3.2-1. (ver1_e, ver1_v, ver1_r) = salt.utils.pkg.rpm.version_to_evr(ver1) (ver2_e, ver2_v, ver2_r) = salt.utils.pkg.rpm.version_to_evr(ver2) if not ver1_r or not ver2_r: ver1_r = ver2_r = '' cmp_result = cmp_func((ver1_e, ver1_v, ver1_r), (ver2_e, ver2_v, ver2_r)) if cmp_result not in (-1, 0, 1): raise CommandExecutionError( 'Comparison result \'{0}\' is invalid'.format(cmp_result) ) return cmp_result except Exception as exc: log.warning( 'Failed to compare version \'%s\' to \'%s\' using RPM: %s', ver1, ver2, exc ) # We would already have normalized the versions at the beginning of this # function if ignore_epoch=True, so avoid unnecessary work and just pass # False for this value. return salt.utils.versions.version_cmp(ver1, ver2, ignore_epoch=False) def checksum(*paths, **kwargs): ''' Return if the signature of a RPM file is valid. root use root as top level directory (default: "/") CLI Example: .. code-block:: bash salt '*' lowpkg.checksum /path/to/package1.rpm salt '*' lowpkg.checksum /path/to/package1.rpm /path/to/package2.rpm ''' ret = dict() if not paths: raise CommandExecutionError("No package files has been specified.") cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['-K', '--quiet']) for package_file in paths: cmd_ = cmd + [package_file] ret[package_file] = (bool(__salt__['file.file_exists'](package_file)) and not __salt__['cmd.retcode'](cmd_, ignore_retcode=True, output_loglevel='trace', python_shell=False)) return ret
saltstack/salt
salt/modules/rpm_lowpkg.py
version_cmp
python
def version_cmp(ver1, ver2, ignore_epoch=False): ''' .. versionadded:: 2015.8.9 Do a cmp-style comparison on two packages. Return -1 if ver1 < ver2, 0 if ver1 == ver2, and 1 if ver1 > ver2. Return None if there was a problem making the comparison. ignore_epoch : False Set to ``True`` to ignore the epoch when comparing versions .. versionadded:: 2015.8.10,2016.3.2 CLI Example: .. code-block:: bash salt '*' pkg.version_cmp '0.2-001' '0.2.0.1-002' ''' normalize = lambda x: six.text_type(x).split(':', 1)[-1] \ if ignore_epoch \ else six.text_type(x) ver1 = normalize(ver1) ver2 = normalize(ver2) try: cmp_func = None if HAS_RPM: try: cmp_func = rpm.labelCompare except AttributeError: # Catches corner case where someone has a module named "rpm" in # their pythonpath. log.debug( 'rpm module imported, but it does not have the ' 'labelCompare function. Not using rpm.labelCompare for ' 'version comparison.' ) if cmp_func is None and HAS_RPMUTILS: try: cmp_func = rpmUtils.miscutils.compareEVR except AttributeError: log.debug('rpmUtils.miscutils.compareEVR is not available') if cmp_func is None: if salt.utils.path.which('rpmdev-vercmp'): # rpmdev-vercmp always uses epochs, even when zero def _ensure_epoch(ver): def _prepend(ver): return '0:{0}'.format(ver) try: if ':' not in ver: return _prepend(ver) except TypeError: return _prepend(ver) return ver ver1 = _ensure_epoch(ver1) ver2 = _ensure_epoch(ver2) result = __salt__['cmd.run_all']( ['rpmdev-vercmp', ver1, ver2], python_shell=False, redirect_stderr=True, ignore_retcode=True) # rpmdev-vercmp returns 0 on equal, 11 on greater-than, and # 12 on less-than. if result['retcode'] == 0: return 0 elif result['retcode'] == 11: return 1 elif result['retcode'] == 12: return -1 else: # We'll need to fall back to salt.utils.versions.version_cmp() log.warning( 'Failed to interpret results of rpmdev-vercmp output. ' 'This is probably a bug, and should be reported. ' 'Return code was %s. Output: %s', result['retcode'], result['stdout'] ) else: # We'll need to fall back to salt.utils.versions.version_cmp() log.warning( 'rpmdevtools is not installed, please install it for ' 'more accurate version comparisons' ) else: # If one EVR is missing a release but not the other and they # otherwise would be equal, ignore the release. This can happen if # e.g. you are checking if a package version 3.2 is satisfied by # 3.2-1. (ver1_e, ver1_v, ver1_r) = salt.utils.pkg.rpm.version_to_evr(ver1) (ver2_e, ver2_v, ver2_r) = salt.utils.pkg.rpm.version_to_evr(ver2) if not ver1_r or not ver2_r: ver1_r = ver2_r = '' cmp_result = cmp_func((ver1_e, ver1_v, ver1_r), (ver2_e, ver2_v, ver2_r)) if cmp_result not in (-1, 0, 1): raise CommandExecutionError( 'Comparison result \'{0}\' is invalid'.format(cmp_result) ) return cmp_result except Exception as exc: log.warning( 'Failed to compare version \'%s\' to \'%s\' using RPM: %s', ver1, ver2, exc ) # We would already have normalized the versions at the beginning of this # function if ignore_epoch=True, so avoid unnecessary work and just pass # False for this value. return salt.utils.versions.version_cmp(ver1, ver2, ignore_epoch=False)
.. versionadded:: 2015.8.9 Do a cmp-style comparison on two packages. Return -1 if ver1 < ver2, 0 if ver1 == ver2, and 1 if ver1 > ver2. Return None if there was a problem making the comparison. ignore_epoch : False Set to ``True`` to ignore the epoch when comparing versions .. versionadded:: 2015.8.10,2016.3.2 CLI Example: .. code-block:: bash salt '*' pkg.version_cmp '0.2-001' '0.2.0.1-002'
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/rpm_lowpkg.py#L677-L791
[ "def version_cmp(pkg1, pkg2, ignore_epoch=False):\n '''\n Compares two version strings using salt.utils.versions.LooseVersion. This\n is a fallback for providers which don't have a version comparison utility\n built into them. Return -1 if version1 < version2, 0 if version1 ==\n version2, and 1 if version1 > version2. Return None if there was a problem\n making the comparison.\n '''\n normalize = lambda x: six.text_type(x).split(':', 1)[-1] \\\n if ignore_epoch else six.text_type(x)\n pkg1 = normalize(pkg1)\n pkg2 = normalize(pkg2)\n\n try:\n # pylint: disable=no-member\n if LooseVersion(pkg1) < LooseVersion(pkg2):\n return -1\n elif LooseVersion(pkg1) == LooseVersion(pkg2):\n return 0\n elif LooseVersion(pkg1) > LooseVersion(pkg2):\n return 1\n except Exception as exc:\n log.exception(exc)\n return None\n", "def version_to_evr(verstring):\n '''\n Split the package version string into epoch, version and release.\n Return this as tuple.\n\n The epoch is always not empty. The version and the release can be an empty\n string if such a component could not be found in the version string.\n\n \"2:1.0-1.2\" => ('2', '1.0', '1.2)\n \"1.0\" => ('0', '1.0', '')\n \"\" => ('0', '', '')\n '''\n if verstring in [None, '']:\n return '0', '', ''\n\n idx_e = verstring.find(':')\n if idx_e != -1:\n try:\n epoch = six.text_type(int(verstring[:idx_e]))\n except ValueError:\n # look, garbage in the epoch field, how fun, kill it\n epoch = '0' # this is our fallback, deal\n else:\n epoch = '0'\n idx_r = verstring.find('-')\n if idx_r != -1:\n version = verstring[idx_e + 1:idx_r]\n release = verstring[idx_r + 1:]\n else:\n version = verstring[idx_e + 1:]\n release = ''\n\n return epoch, version, release\n", "normalize = lambda x: six.text_type(x).split(':', 1)[-1] \\\n if ignore_epoch \\\n else six.text_type(x)\n", "def _ensure_epoch(ver):\n def _prepend(ver):\n return '0:{0}'.format(ver)\n\n try:\n if ':' not in ver:\n return _prepend(ver)\n except TypeError:\n return _prepend(ver)\n return ver\n" ]
# -*- coding: utf-8 -*- ''' Support for rpm ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import logging import os import re import datetime from salt.utils.versions import LooseVersion # Import Salt libs import salt.utils.decorators.path import salt.utils.itertools import salt.utils.path import salt.utils.pkg.rpm import salt.utils.versions # pylint: disable=import-error,redefined-builtin from salt.ext.six.moves import zip from salt.ext import six try: import rpm HAS_RPM = True except ImportError: HAS_RPM = False try: import rpmUtils.miscutils HAS_RPMUTILS = True except ImportError: HAS_RPMUTILS = False # pylint: enable=import-error,redefined-builtin from salt.exceptions import CommandExecutionError, SaltInvocationError log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'lowpkg' def __virtual__(): ''' Confine this module to rpm based systems ''' if not salt.utils.path.which('rpm'): return (False, 'The rpm execution module failed to load: rpm binary is not in the path.') try: os_grain = __grains__['os'].lower() os_family = __grains__['os_family'].lower() except Exception: return (False, 'The rpm execution module failed to load: failed to detect os or os_family grains.') enabled = ('amazon', 'xcp', 'xenserver', 'VirtuozzoLinux') if os_family in ['redhat', 'suse'] or os_grain in enabled: return __virtualname__ return (False, 'The rpm execution module failed to load: only available on redhat/suse type systems ' 'or amazon, xcp or xenserver.') def bin_pkg_info(path, saltenv='base'): ''' .. versionadded:: 2015.8.0 Parses RPM metadata and returns a dictionary of information about the package (name, version, etc.). path Path to the file. Can either be an absolute path to a file on the minion, or a salt fileserver URL (e.g. ``salt://path/to/file.rpm``). If a salt fileserver URL is passed, the file will be cached to the minion so that it can be examined. saltenv : base Salt fileserver environment from which to retrieve the package. Ignored if ``path`` is a local file path on the minion. CLI Example: .. code-block:: bash salt '*' lowpkg.bin_pkg_info /root/salt-2015.5.1-2.el7.noarch.rpm salt '*' lowpkg.bin_pkg_info salt://salt-2015.5.1-2.el7.noarch.rpm ''' # If the path is a valid protocol, pull it down using cp.cache_file if __salt__['config.valid_fileproto'](path): newpath = __salt__['cp.cache_file'](path, saltenv) if not newpath: raise CommandExecutionError( 'Unable to retrieve {0} from saltenv \'{1}\'' .format(path, saltenv) ) path = newpath else: if not os.path.exists(path): raise CommandExecutionError( '{0} does not exist on minion'.format(path) ) elif not os.path.isabs(path): raise SaltInvocationError( '{0} does not exist on minion'.format(path) ) # REPOID is not a valid tag for the rpm command. Remove it and replace it # with 'none' queryformat = salt.utils.pkg.rpm.QUERYFORMAT.replace('%{REPOID}', 'none') output = __salt__['cmd.run_stdout']( ['rpm', '-qp', '--queryformat', queryformat, path], output_loglevel='trace', ignore_retcode=True, python_shell=False ) ret = {} pkginfo = salt.utils.pkg.rpm.parse_pkginfo( output, osarch=__grains__['osarch'] ) try: for field in pkginfo._fields: ret[field] = getattr(pkginfo, field) except AttributeError: # pkginfo is None return None return ret def list_pkgs(*packages, **kwargs): ''' List the packages currently installed in a dict:: {'<package_name>': '<version>'} root use root as top level directory (default: "/") CLI Example: .. code-block:: bash salt '*' lowpkg.list_pkgs ''' pkgs = {} cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['-q' if packages else '-qa', '--queryformat', r'%{NAME} %{VERSION}\n']) if packages: cmd.extend(packages) out = __salt__['cmd.run'](cmd, output_loglevel='trace', python_shell=False) for line in salt.utils.itertools.split(out, '\n'): if 'is not installed' in line: continue comps = line.split() pkgs[comps[0]] = comps[1] return pkgs def verify(*packages, **kwargs): ''' Runs an rpm -Va on a system, and returns the results in a dict root use root as top level directory (default: "/") Files with an attribute of config, doc, ghost, license or readme in the package header can be ignored using the ``ignore_types`` keyword argument CLI Example: .. code-block:: bash salt '*' lowpkg.verify salt '*' lowpkg.verify httpd salt '*' lowpkg.verify httpd postfix salt '*' lowpkg.verify httpd postfix ignore_types=['config','doc'] ''' ftypes = {'c': 'config', 'd': 'doc', 'g': 'ghost', 'l': 'license', 'r': 'readme'} ret = {} ignore_types = kwargs.get('ignore_types', []) if not isinstance(ignore_types, (list, six.string_types)): raise SaltInvocationError( 'ignore_types must be a list or a comma-separated string' ) if isinstance(ignore_types, six.string_types): try: ignore_types = [x.strip() for x in ignore_types.split(',')] except AttributeError: ignore_types = [x.strip() for x in six.text_type(ignore_types).split(',')] verify_options = kwargs.get('verify_options', []) if not isinstance(verify_options, (list, six.string_types)): raise SaltInvocationError( 'verify_options must be a list or a comma-separated string' ) if isinstance(verify_options, six.string_types): try: verify_options = [x.strip() for x in verify_options.split(',')] except AttributeError: verify_options = [x.strip() for x in six.text_type(verify_options).split(',')] cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['--' + x for x in verify_options]) if packages: cmd.append('-V') # Can't concatenate a tuple, must do a list.extend() cmd.extend(packages) else: cmd.append('-Va') out = __salt__['cmd.run_all'](cmd, output_loglevel='trace', ignore_retcode=True, python_shell=False) if not out['stdout'].strip() and out['retcode'] != 0: # If there is no stdout and the retcode is 0, then verification # succeeded, but if the retcode is nonzero, then the command failed. msg = 'Failed to verify package(s)' if out['stderr']: msg += ': {0}'.format(out['stderr']) raise CommandExecutionError(msg) for line in salt.utils.itertools.split(out['stdout'], '\n'): fdict = {'mismatch': []} if 'missing' in line: line = ' ' + line fdict['missing'] = True del fdict['mismatch'] fname = line[13:] if line[11:12] in ftypes: fdict['type'] = ftypes[line[11:12]] if 'type' not in fdict or fdict['type'] not in ignore_types: if line[0:1] == 'S': fdict['mismatch'].append('size') if line[1:2] == 'M': fdict['mismatch'].append('mode') if line[2:3] == '5': fdict['mismatch'].append('md5sum') if line[3:4] == 'D': fdict['mismatch'].append('device major/minor number') if line[4:5] == 'L': fdict['mismatch'].append('readlink path') if line[5:6] == 'U': fdict['mismatch'].append('user') if line[6:7] == 'G': fdict['mismatch'].append('group') if line[7:8] == 'T': fdict['mismatch'].append('mtime') if line[8:9] == 'P': fdict['mismatch'].append('capabilities') ret[fname] = fdict return ret def modified(*packages, **flags): ''' List the modified files that belong to a package. Not specifying any packages will return a list of _all_ modified files on the system's RPM database. .. versionadded:: 2015.5.0 root use root as top level directory (default: "/") CLI examples: .. code-block:: bash salt '*' lowpkg.modified httpd salt '*' lowpkg.modified httpd postfix salt '*' lowpkg.modified ''' cmd = ['rpm'] if flags.get('root'): cmd.extend(['--root', flags.pop('root')]) cmd.append('-Va') cmd.extend(packages) ret = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False) data = {} # If verification has an output, then it means it failed # and the return code will be 1. We are interested in any bigger # than 1 code. if ret['retcode'] > 1: del ret['stdout'] return ret elif not ret['retcode']: return data ptrn = re.compile(r"\s+") changes = cfg = f_name = None for f_info in salt.utils.itertools.split(ret['stdout'], '\n'): f_info = ptrn.split(f_info) if len(f_info) == 3: # Config file changes, cfg, f_name = f_info else: changes, f_name = f_info cfg = None keys = ['size', 'mode', 'checksum', 'device', 'symlink', 'owner', 'group', 'time', 'capabilities'] changes = list(changes) if len(changes) == 8: # Older RPMs do not support capabilities changes.append('.') stats = [] for k, v in zip(keys, changes): if v != '.': stats.append(k) if cfg is not None: stats.append('config') data[f_name] = stats if not flags: return data # Filtering filtered_data = {} for f_name, stats in data.items(): include = True for param, pval in flags.items(): if param.startswith("_"): continue if (not pval and param in stats) or \ (pval and param not in stats): include = False break if include: filtered_data[f_name] = stats return filtered_data def file_list(*packages, **kwargs): ''' List the files that belong to a package. Not specifying any packages will return a list of _every_ file on the system's rpm database (not generally recommended). root use root as top level directory (default: "/") CLI Examples: .. code-block:: bash salt '*' lowpkg.file_list httpd salt '*' lowpkg.file_list httpd postfix salt '*' lowpkg.file_list ''' cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.append('-ql' if packages else '-qla') if packages: # Can't concatenate a tuple, must do a list.extend() cmd.extend(packages) ret = __salt__['cmd.run']( cmd, output_loglevel='trace', python_shell=False).splitlines() return {'errors': [], 'files': ret} def file_dict(*packages, **kwargs): ''' List the files that belong to a package, sorted by group. Not specifying any packages will return a list of _every_ file on the system's rpm database (not generally recommended). root use root as top level directory (default: "/") CLI Examples: .. code-block:: bash salt '*' lowpkg.file_dict httpd salt '*' lowpkg.file_dict httpd postfix salt '*' lowpkg.file_dict ''' errors = [] ret = {} pkgs = {} cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['-q' if packages else '-qa', '--queryformat', r'%{NAME} %{VERSION}\n']) if packages: cmd.extend(packages) out = __salt__['cmd.run'](cmd, output_loglevel='trace', python_shell=False) for line in salt.utils.itertools.split(out, '\n'): if 'is not installed' in line: errors.append(line) continue comps = line.split() pkgs[comps[0]] = {'version': comps[1]} for pkg in pkgs: cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['-ql', pkg]) out = __salt__['cmd.run']( ['rpm', '-ql', pkg], output_loglevel='trace', python_shell=False) ret[pkg] = out.splitlines() return {'errors': errors, 'packages': ret} def owner(*paths, **kwargs): ''' Return the name of the package that owns the file. Multiple file paths can be passed. If a single path is passed, a string will be returned, and if multiple paths are passed, a dictionary of file/package name pairs will be returned. If the file is not owned by a package, or is not present on the minion, then an empty string will be returned for that path. root use root as top level directory (default: "/") CLI Examples: .. code-block:: bash salt '*' lowpkg.owner /usr/bin/apachectl salt '*' lowpkg.owner /usr/bin/apachectl /etc/httpd/conf/httpd.conf ''' if not paths: return '' ret = {} for path in paths: cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['-qf', '--queryformat', '%{name}', path]) ret[path] = __salt__['cmd.run_stdout'](cmd, output_loglevel='trace', python_shell=False) if 'not owned' in ret[path].lower(): ret[path] = '' if len(ret) == 1: return list(ret.values())[0] return ret @salt.utils.decorators.path.which('rpm2cpio') @salt.utils.decorators.path.which('cpio') @salt.utils.decorators.path.which('diff') def diff(package_path, path): ''' Return a formatted diff between current file and original in a package. NOTE: this function includes all files (configuration and not), but does not work on binary content. :param package: Full pack of the RPM file :param path: Full path to the installed file :return: Difference or empty string. For binary files only a notification. CLI example: .. code-block:: bash salt '*' lowpkg.diff /path/to/apache2.rpm /etc/apache2/httpd.conf ''' cmd = "rpm2cpio {0} " \ "| cpio -i --quiet --to-stdout .{1} " \ "| diff -u --label 'A {1}' --from-file=- --label 'B {1}' {1}" res = __salt__['cmd.shell'](cmd.format(package_path, path), output_loglevel='trace') if res and res.startswith('Binary file'): return 'File \'{0}\' is binary and its content has been ' \ 'modified.'.format(path) return res def info(*packages, **kwargs): ''' Return a detailed package(s) summary information. If no packages specified, all packages will be returned. :param packages: :param attr: Comma-separated package attributes. If no 'attr' is specified, all available attributes returned. Valid attributes are: version, vendor, release, build_date, build_date_time_t, install_date, install_date_time_t, build_host, group, source_rpm, arch, epoch, size, license, signature, packager, url, summary, description. :param all_versions: Return information for all installed versions of the packages :param root: use root as top level directory (default: "/") :return: CLI example: .. code-block:: bash salt '*' lowpkg.info apache2 bash salt '*' lowpkg.info apache2 bash attr=version salt '*' lowpkg.info apache2 bash attr=version,build_date_iso,size salt '*' lowpkg.info apache2 bash attr=version,build_date_iso,size all_versions=True ''' all_versions = kwargs.get('all_versions', False) # LONGSIZE is not a valid tag for all versions of rpm. If LONGSIZE isn't # available, then we can just use SIZE for older versions. See Issue #31366. rpm_tags = __salt__['cmd.run_stdout']( ['rpm', '--querytags'], python_shell=False).splitlines() if 'LONGSIZE' in rpm_tags: size_tag = '%{LONGSIZE}' else: size_tag = '%{SIZE}' cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) if packages: cmd.append('-q') cmd.extend(packages) else: cmd.append('-qa') # Construct query format attr_map = { "name": "name: %{NAME}\\n", "relocations": "relocations: %|PREFIXES?{[%{PREFIXES} ]}:{(not relocatable)}|\\n", "version": "version: %{VERSION}\\n", "vendor": "vendor: %{VENDOR}\\n", "release": "release: %{RELEASE}\\n", "epoch": "%|EPOCH?{epoch: %{EPOCH}\\n}|", "build_date_time_t": "build_date_time_t: %{BUILDTIME}\\n", "build_date": "build_date: %{BUILDTIME}\\n", "install_date_time_t": "install_date_time_t: %|INSTALLTIME?{%{INSTALLTIME}}:{(not installed)}|\\n", "install_date": "install_date: %|INSTALLTIME?{%{INSTALLTIME}}:{(not installed)}|\\n", "build_host": "build_host: %{BUILDHOST}\\n", "group": "group: %{GROUP}\\n", "source_rpm": "source_rpm: %{SOURCERPM}\\n", "size": "size: " + size_tag + "\\n", "arch": "arch: %{ARCH}\\n", "license": "%|LICENSE?{license: %{LICENSE}\\n}|", "signature": "signature: %|DSAHEADER?{%{DSAHEADER:pgpsig}}:{%|RSAHEADER?{%{RSAHEADER:pgpsig}}:" "{%|SIGGPG?{%{SIGGPG:pgpsig}}:{%|SIGPGP?{%{SIGPGP:pgpsig}}:{(none)}|}|}|}|\\n", "packager": "%|PACKAGER?{packager: %{PACKAGER}\\n}|", "url": "%|URL?{url: %{URL}\\n}|", "summary": "summary: %{SUMMARY}\\n", "description": "description:\\n%{DESCRIPTION}\\n", "edition": "edition: %|EPOCH?{%{EPOCH}:}|%{VERSION}-%{RELEASE}\\n", } attr = kwargs.get('attr', None) and kwargs['attr'].split(",") or None query = list() if attr: for attr_k in attr: if attr_k in attr_map and attr_k != 'description': query.append(attr_map[attr_k]) if not query: raise CommandExecutionError('No valid attributes found.') if 'name' not in attr: attr.append('name') query.append(attr_map['name']) if 'edition' not in attr: attr.append('edition') query.append(attr_map['edition']) else: for attr_k, attr_v in six.iteritems(attr_map): if attr_k != 'description': query.append(attr_v) if attr and 'description' in attr or not attr: query.append(attr_map['description']) query.append("-----\\n") cmd = ' '.join(cmd) call = __salt__['cmd.run_all'](cmd + (" --queryformat '{0}'".format(''.join(query))), output_loglevel='trace', env={'TZ': 'UTC'}, clean_env=True) if call['retcode'] != 0: comment = '' if 'stderr' in call: comment += (call['stderr'] or call['stdout']) raise CommandExecutionError(comment) elif 'error' in call['stderr']: raise CommandExecutionError(call['stderr']) else: out = call['stdout'] _ret = list() for pkg_info in re.split(r"----*", out): pkg_info = pkg_info.strip() if not pkg_info: continue pkg_info = pkg_info.split(os.linesep) if pkg_info[-1].lower().startswith('distribution'): pkg_info = pkg_info[:-1] pkg_data = dict() pkg_name = None descr_marker = False descr = list() for line in pkg_info: if descr_marker: descr.append(line) continue line = [item.strip() for item in line.split(':', 1)] if len(line) != 2: continue key, value = line if key == 'description': descr_marker = True continue if key == 'name': pkg_name = value # Convert Unix ticks into ISO time format if key in ['build_date', 'install_date']: try: pkg_data[key] = datetime.datetime.utcfromtimestamp(int(value)).isoformat() + "Z" except ValueError: log.warning('Could not convert "%s" into Unix time', value) continue # Convert Unix ticks into an Integer if key in ['build_date_time_t', 'install_date_time_t']: try: pkg_data[key] = int(value) except ValueError: log.warning('Could not convert "%s" into Unix time', value) continue if key not in ['description', 'name'] and value: pkg_data[key] = value if attr and 'description' in attr or not attr: pkg_data['description'] = os.linesep.join(descr) if pkg_name: pkg_data['name'] = pkg_name _ret.append(pkg_data) # Force-sort package data by version, # pick only latest versions # (in case multiple packages installed, e.g. kernel) ret = dict() for pkg_data in reversed(sorted(_ret, key=lambda x: LooseVersion(x['edition']))): pkg_name = pkg_data.pop('name') # Filter out GPG public keys packages if pkg_name.startswith('gpg-pubkey'): continue if pkg_name not in ret: if all_versions: ret[pkg_name] = [pkg_data.copy()] else: ret[pkg_name] = pkg_data.copy() del ret[pkg_name]['edition'] elif all_versions: ret[pkg_name].append(pkg_data.copy()) return ret def checksum(*paths, **kwargs): ''' Return if the signature of a RPM file is valid. root use root as top level directory (default: "/") CLI Example: .. code-block:: bash salt '*' lowpkg.checksum /path/to/package1.rpm salt '*' lowpkg.checksum /path/to/package1.rpm /path/to/package2.rpm ''' ret = dict() if not paths: raise CommandExecutionError("No package files has been specified.") cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['-K', '--quiet']) for package_file in paths: cmd_ = cmd + [package_file] ret[package_file] = (bool(__salt__['file.file_exists'](package_file)) and not __salt__['cmd.retcode'](cmd_, ignore_retcode=True, output_loglevel='trace', python_shell=False)) return ret
saltstack/salt
salt/modules/rpm_lowpkg.py
checksum
python
def checksum(*paths, **kwargs): ''' Return if the signature of a RPM file is valid. root use root as top level directory (default: "/") CLI Example: .. code-block:: bash salt '*' lowpkg.checksum /path/to/package1.rpm salt '*' lowpkg.checksum /path/to/package1.rpm /path/to/package2.rpm ''' ret = dict() if not paths: raise CommandExecutionError("No package files has been specified.") cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['-K', '--quiet']) for package_file in paths: cmd_ = cmd + [package_file] ret[package_file] = (bool(__salt__['file.file_exists'](package_file)) and not __salt__['cmd.retcode'](cmd_, ignore_retcode=True, output_loglevel='trace', python_shell=False)) return ret
Return if the signature of a RPM file is valid. root use root as top level directory (default: "/") CLI Example: .. code-block:: bash salt '*' lowpkg.checksum /path/to/package1.rpm salt '*' lowpkg.checksum /path/to/package1.rpm /path/to/package2.rpm
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/rpm_lowpkg.py#L794-L825
null
# -*- coding: utf-8 -*- ''' Support for rpm ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import logging import os import re import datetime from salt.utils.versions import LooseVersion # Import Salt libs import salt.utils.decorators.path import salt.utils.itertools import salt.utils.path import salt.utils.pkg.rpm import salt.utils.versions # pylint: disable=import-error,redefined-builtin from salt.ext.six.moves import zip from salt.ext import six try: import rpm HAS_RPM = True except ImportError: HAS_RPM = False try: import rpmUtils.miscutils HAS_RPMUTILS = True except ImportError: HAS_RPMUTILS = False # pylint: enable=import-error,redefined-builtin from salt.exceptions import CommandExecutionError, SaltInvocationError log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'lowpkg' def __virtual__(): ''' Confine this module to rpm based systems ''' if not salt.utils.path.which('rpm'): return (False, 'The rpm execution module failed to load: rpm binary is not in the path.') try: os_grain = __grains__['os'].lower() os_family = __grains__['os_family'].lower() except Exception: return (False, 'The rpm execution module failed to load: failed to detect os or os_family grains.') enabled = ('amazon', 'xcp', 'xenserver', 'VirtuozzoLinux') if os_family in ['redhat', 'suse'] or os_grain in enabled: return __virtualname__ return (False, 'The rpm execution module failed to load: only available on redhat/suse type systems ' 'or amazon, xcp or xenserver.') def bin_pkg_info(path, saltenv='base'): ''' .. versionadded:: 2015.8.0 Parses RPM metadata and returns a dictionary of information about the package (name, version, etc.). path Path to the file. Can either be an absolute path to a file on the minion, or a salt fileserver URL (e.g. ``salt://path/to/file.rpm``). If a salt fileserver URL is passed, the file will be cached to the minion so that it can be examined. saltenv : base Salt fileserver environment from which to retrieve the package. Ignored if ``path`` is a local file path on the minion. CLI Example: .. code-block:: bash salt '*' lowpkg.bin_pkg_info /root/salt-2015.5.1-2.el7.noarch.rpm salt '*' lowpkg.bin_pkg_info salt://salt-2015.5.1-2.el7.noarch.rpm ''' # If the path is a valid protocol, pull it down using cp.cache_file if __salt__['config.valid_fileproto'](path): newpath = __salt__['cp.cache_file'](path, saltenv) if not newpath: raise CommandExecutionError( 'Unable to retrieve {0} from saltenv \'{1}\'' .format(path, saltenv) ) path = newpath else: if not os.path.exists(path): raise CommandExecutionError( '{0} does not exist on minion'.format(path) ) elif not os.path.isabs(path): raise SaltInvocationError( '{0} does not exist on minion'.format(path) ) # REPOID is not a valid tag for the rpm command. Remove it and replace it # with 'none' queryformat = salt.utils.pkg.rpm.QUERYFORMAT.replace('%{REPOID}', 'none') output = __salt__['cmd.run_stdout']( ['rpm', '-qp', '--queryformat', queryformat, path], output_loglevel='trace', ignore_retcode=True, python_shell=False ) ret = {} pkginfo = salt.utils.pkg.rpm.parse_pkginfo( output, osarch=__grains__['osarch'] ) try: for field in pkginfo._fields: ret[field] = getattr(pkginfo, field) except AttributeError: # pkginfo is None return None return ret def list_pkgs(*packages, **kwargs): ''' List the packages currently installed in a dict:: {'<package_name>': '<version>'} root use root as top level directory (default: "/") CLI Example: .. code-block:: bash salt '*' lowpkg.list_pkgs ''' pkgs = {} cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['-q' if packages else '-qa', '--queryformat', r'%{NAME} %{VERSION}\n']) if packages: cmd.extend(packages) out = __salt__['cmd.run'](cmd, output_loglevel='trace', python_shell=False) for line in salt.utils.itertools.split(out, '\n'): if 'is not installed' in line: continue comps = line.split() pkgs[comps[0]] = comps[1] return pkgs def verify(*packages, **kwargs): ''' Runs an rpm -Va on a system, and returns the results in a dict root use root as top level directory (default: "/") Files with an attribute of config, doc, ghost, license or readme in the package header can be ignored using the ``ignore_types`` keyword argument CLI Example: .. code-block:: bash salt '*' lowpkg.verify salt '*' lowpkg.verify httpd salt '*' lowpkg.verify httpd postfix salt '*' lowpkg.verify httpd postfix ignore_types=['config','doc'] ''' ftypes = {'c': 'config', 'd': 'doc', 'g': 'ghost', 'l': 'license', 'r': 'readme'} ret = {} ignore_types = kwargs.get('ignore_types', []) if not isinstance(ignore_types, (list, six.string_types)): raise SaltInvocationError( 'ignore_types must be a list or a comma-separated string' ) if isinstance(ignore_types, six.string_types): try: ignore_types = [x.strip() for x in ignore_types.split(',')] except AttributeError: ignore_types = [x.strip() for x in six.text_type(ignore_types).split(',')] verify_options = kwargs.get('verify_options', []) if not isinstance(verify_options, (list, six.string_types)): raise SaltInvocationError( 'verify_options must be a list or a comma-separated string' ) if isinstance(verify_options, six.string_types): try: verify_options = [x.strip() for x in verify_options.split(',')] except AttributeError: verify_options = [x.strip() for x in six.text_type(verify_options).split(',')] cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['--' + x for x in verify_options]) if packages: cmd.append('-V') # Can't concatenate a tuple, must do a list.extend() cmd.extend(packages) else: cmd.append('-Va') out = __salt__['cmd.run_all'](cmd, output_loglevel='trace', ignore_retcode=True, python_shell=False) if not out['stdout'].strip() and out['retcode'] != 0: # If there is no stdout and the retcode is 0, then verification # succeeded, but if the retcode is nonzero, then the command failed. msg = 'Failed to verify package(s)' if out['stderr']: msg += ': {0}'.format(out['stderr']) raise CommandExecutionError(msg) for line in salt.utils.itertools.split(out['stdout'], '\n'): fdict = {'mismatch': []} if 'missing' in line: line = ' ' + line fdict['missing'] = True del fdict['mismatch'] fname = line[13:] if line[11:12] in ftypes: fdict['type'] = ftypes[line[11:12]] if 'type' not in fdict or fdict['type'] not in ignore_types: if line[0:1] == 'S': fdict['mismatch'].append('size') if line[1:2] == 'M': fdict['mismatch'].append('mode') if line[2:3] == '5': fdict['mismatch'].append('md5sum') if line[3:4] == 'D': fdict['mismatch'].append('device major/minor number') if line[4:5] == 'L': fdict['mismatch'].append('readlink path') if line[5:6] == 'U': fdict['mismatch'].append('user') if line[6:7] == 'G': fdict['mismatch'].append('group') if line[7:8] == 'T': fdict['mismatch'].append('mtime') if line[8:9] == 'P': fdict['mismatch'].append('capabilities') ret[fname] = fdict return ret def modified(*packages, **flags): ''' List the modified files that belong to a package. Not specifying any packages will return a list of _all_ modified files on the system's RPM database. .. versionadded:: 2015.5.0 root use root as top level directory (default: "/") CLI examples: .. code-block:: bash salt '*' lowpkg.modified httpd salt '*' lowpkg.modified httpd postfix salt '*' lowpkg.modified ''' cmd = ['rpm'] if flags.get('root'): cmd.extend(['--root', flags.pop('root')]) cmd.append('-Va') cmd.extend(packages) ret = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False) data = {} # If verification has an output, then it means it failed # and the return code will be 1. We are interested in any bigger # than 1 code. if ret['retcode'] > 1: del ret['stdout'] return ret elif not ret['retcode']: return data ptrn = re.compile(r"\s+") changes = cfg = f_name = None for f_info in salt.utils.itertools.split(ret['stdout'], '\n'): f_info = ptrn.split(f_info) if len(f_info) == 3: # Config file changes, cfg, f_name = f_info else: changes, f_name = f_info cfg = None keys = ['size', 'mode', 'checksum', 'device', 'symlink', 'owner', 'group', 'time', 'capabilities'] changes = list(changes) if len(changes) == 8: # Older RPMs do not support capabilities changes.append('.') stats = [] for k, v in zip(keys, changes): if v != '.': stats.append(k) if cfg is not None: stats.append('config') data[f_name] = stats if not flags: return data # Filtering filtered_data = {} for f_name, stats in data.items(): include = True for param, pval in flags.items(): if param.startswith("_"): continue if (not pval and param in stats) or \ (pval and param not in stats): include = False break if include: filtered_data[f_name] = stats return filtered_data def file_list(*packages, **kwargs): ''' List the files that belong to a package. Not specifying any packages will return a list of _every_ file on the system's rpm database (not generally recommended). root use root as top level directory (default: "/") CLI Examples: .. code-block:: bash salt '*' lowpkg.file_list httpd salt '*' lowpkg.file_list httpd postfix salt '*' lowpkg.file_list ''' cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.append('-ql' if packages else '-qla') if packages: # Can't concatenate a tuple, must do a list.extend() cmd.extend(packages) ret = __salt__['cmd.run']( cmd, output_loglevel='trace', python_shell=False).splitlines() return {'errors': [], 'files': ret} def file_dict(*packages, **kwargs): ''' List the files that belong to a package, sorted by group. Not specifying any packages will return a list of _every_ file on the system's rpm database (not generally recommended). root use root as top level directory (default: "/") CLI Examples: .. code-block:: bash salt '*' lowpkg.file_dict httpd salt '*' lowpkg.file_dict httpd postfix salt '*' lowpkg.file_dict ''' errors = [] ret = {} pkgs = {} cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['-q' if packages else '-qa', '--queryformat', r'%{NAME} %{VERSION}\n']) if packages: cmd.extend(packages) out = __salt__['cmd.run'](cmd, output_loglevel='trace', python_shell=False) for line in salt.utils.itertools.split(out, '\n'): if 'is not installed' in line: errors.append(line) continue comps = line.split() pkgs[comps[0]] = {'version': comps[1]} for pkg in pkgs: cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['-ql', pkg]) out = __salt__['cmd.run']( ['rpm', '-ql', pkg], output_loglevel='trace', python_shell=False) ret[pkg] = out.splitlines() return {'errors': errors, 'packages': ret} def owner(*paths, **kwargs): ''' Return the name of the package that owns the file. Multiple file paths can be passed. If a single path is passed, a string will be returned, and if multiple paths are passed, a dictionary of file/package name pairs will be returned. If the file is not owned by a package, or is not present on the minion, then an empty string will be returned for that path. root use root as top level directory (default: "/") CLI Examples: .. code-block:: bash salt '*' lowpkg.owner /usr/bin/apachectl salt '*' lowpkg.owner /usr/bin/apachectl /etc/httpd/conf/httpd.conf ''' if not paths: return '' ret = {} for path in paths: cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.extend(['-qf', '--queryformat', '%{name}', path]) ret[path] = __salt__['cmd.run_stdout'](cmd, output_loglevel='trace', python_shell=False) if 'not owned' in ret[path].lower(): ret[path] = '' if len(ret) == 1: return list(ret.values())[0] return ret @salt.utils.decorators.path.which('rpm2cpio') @salt.utils.decorators.path.which('cpio') @salt.utils.decorators.path.which('diff') def diff(package_path, path): ''' Return a formatted diff between current file and original in a package. NOTE: this function includes all files (configuration and not), but does not work on binary content. :param package: Full pack of the RPM file :param path: Full path to the installed file :return: Difference or empty string. For binary files only a notification. CLI example: .. code-block:: bash salt '*' lowpkg.diff /path/to/apache2.rpm /etc/apache2/httpd.conf ''' cmd = "rpm2cpio {0} " \ "| cpio -i --quiet --to-stdout .{1} " \ "| diff -u --label 'A {1}' --from-file=- --label 'B {1}' {1}" res = __salt__['cmd.shell'](cmd.format(package_path, path), output_loglevel='trace') if res and res.startswith('Binary file'): return 'File \'{0}\' is binary and its content has been ' \ 'modified.'.format(path) return res def info(*packages, **kwargs): ''' Return a detailed package(s) summary information. If no packages specified, all packages will be returned. :param packages: :param attr: Comma-separated package attributes. If no 'attr' is specified, all available attributes returned. Valid attributes are: version, vendor, release, build_date, build_date_time_t, install_date, install_date_time_t, build_host, group, source_rpm, arch, epoch, size, license, signature, packager, url, summary, description. :param all_versions: Return information for all installed versions of the packages :param root: use root as top level directory (default: "/") :return: CLI example: .. code-block:: bash salt '*' lowpkg.info apache2 bash salt '*' lowpkg.info apache2 bash attr=version salt '*' lowpkg.info apache2 bash attr=version,build_date_iso,size salt '*' lowpkg.info apache2 bash attr=version,build_date_iso,size all_versions=True ''' all_versions = kwargs.get('all_versions', False) # LONGSIZE is not a valid tag for all versions of rpm. If LONGSIZE isn't # available, then we can just use SIZE for older versions. See Issue #31366. rpm_tags = __salt__['cmd.run_stdout']( ['rpm', '--querytags'], python_shell=False).splitlines() if 'LONGSIZE' in rpm_tags: size_tag = '%{LONGSIZE}' else: size_tag = '%{SIZE}' cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) if packages: cmd.append('-q') cmd.extend(packages) else: cmd.append('-qa') # Construct query format attr_map = { "name": "name: %{NAME}\\n", "relocations": "relocations: %|PREFIXES?{[%{PREFIXES} ]}:{(not relocatable)}|\\n", "version": "version: %{VERSION}\\n", "vendor": "vendor: %{VENDOR}\\n", "release": "release: %{RELEASE}\\n", "epoch": "%|EPOCH?{epoch: %{EPOCH}\\n}|", "build_date_time_t": "build_date_time_t: %{BUILDTIME}\\n", "build_date": "build_date: %{BUILDTIME}\\n", "install_date_time_t": "install_date_time_t: %|INSTALLTIME?{%{INSTALLTIME}}:{(not installed)}|\\n", "install_date": "install_date: %|INSTALLTIME?{%{INSTALLTIME}}:{(not installed)}|\\n", "build_host": "build_host: %{BUILDHOST}\\n", "group": "group: %{GROUP}\\n", "source_rpm": "source_rpm: %{SOURCERPM}\\n", "size": "size: " + size_tag + "\\n", "arch": "arch: %{ARCH}\\n", "license": "%|LICENSE?{license: %{LICENSE}\\n}|", "signature": "signature: %|DSAHEADER?{%{DSAHEADER:pgpsig}}:{%|RSAHEADER?{%{RSAHEADER:pgpsig}}:" "{%|SIGGPG?{%{SIGGPG:pgpsig}}:{%|SIGPGP?{%{SIGPGP:pgpsig}}:{(none)}|}|}|}|\\n", "packager": "%|PACKAGER?{packager: %{PACKAGER}\\n}|", "url": "%|URL?{url: %{URL}\\n}|", "summary": "summary: %{SUMMARY}\\n", "description": "description:\\n%{DESCRIPTION}\\n", "edition": "edition: %|EPOCH?{%{EPOCH}:}|%{VERSION}-%{RELEASE}\\n", } attr = kwargs.get('attr', None) and kwargs['attr'].split(",") or None query = list() if attr: for attr_k in attr: if attr_k in attr_map and attr_k != 'description': query.append(attr_map[attr_k]) if not query: raise CommandExecutionError('No valid attributes found.') if 'name' not in attr: attr.append('name') query.append(attr_map['name']) if 'edition' not in attr: attr.append('edition') query.append(attr_map['edition']) else: for attr_k, attr_v in six.iteritems(attr_map): if attr_k != 'description': query.append(attr_v) if attr and 'description' in attr or not attr: query.append(attr_map['description']) query.append("-----\\n") cmd = ' '.join(cmd) call = __salt__['cmd.run_all'](cmd + (" --queryformat '{0}'".format(''.join(query))), output_loglevel='trace', env={'TZ': 'UTC'}, clean_env=True) if call['retcode'] != 0: comment = '' if 'stderr' in call: comment += (call['stderr'] or call['stdout']) raise CommandExecutionError(comment) elif 'error' in call['stderr']: raise CommandExecutionError(call['stderr']) else: out = call['stdout'] _ret = list() for pkg_info in re.split(r"----*", out): pkg_info = pkg_info.strip() if not pkg_info: continue pkg_info = pkg_info.split(os.linesep) if pkg_info[-1].lower().startswith('distribution'): pkg_info = pkg_info[:-1] pkg_data = dict() pkg_name = None descr_marker = False descr = list() for line in pkg_info: if descr_marker: descr.append(line) continue line = [item.strip() for item in line.split(':', 1)] if len(line) != 2: continue key, value = line if key == 'description': descr_marker = True continue if key == 'name': pkg_name = value # Convert Unix ticks into ISO time format if key in ['build_date', 'install_date']: try: pkg_data[key] = datetime.datetime.utcfromtimestamp(int(value)).isoformat() + "Z" except ValueError: log.warning('Could not convert "%s" into Unix time', value) continue # Convert Unix ticks into an Integer if key in ['build_date_time_t', 'install_date_time_t']: try: pkg_data[key] = int(value) except ValueError: log.warning('Could not convert "%s" into Unix time', value) continue if key not in ['description', 'name'] and value: pkg_data[key] = value if attr and 'description' in attr or not attr: pkg_data['description'] = os.linesep.join(descr) if pkg_name: pkg_data['name'] = pkg_name _ret.append(pkg_data) # Force-sort package data by version, # pick only latest versions # (in case multiple packages installed, e.g. kernel) ret = dict() for pkg_data in reversed(sorted(_ret, key=lambda x: LooseVersion(x['edition']))): pkg_name = pkg_data.pop('name') # Filter out GPG public keys packages if pkg_name.startswith('gpg-pubkey'): continue if pkg_name not in ret: if all_versions: ret[pkg_name] = [pkg_data.copy()] else: ret[pkg_name] = pkg_data.copy() del ret[pkg_name]['edition'] elif all_versions: ret[pkg_name].append(pkg_data.copy()) return ret def version_cmp(ver1, ver2, ignore_epoch=False): ''' .. versionadded:: 2015.8.9 Do a cmp-style comparison on two packages. Return -1 if ver1 < ver2, 0 if ver1 == ver2, and 1 if ver1 > ver2. Return None if there was a problem making the comparison. ignore_epoch : False Set to ``True`` to ignore the epoch when comparing versions .. versionadded:: 2015.8.10,2016.3.2 CLI Example: .. code-block:: bash salt '*' pkg.version_cmp '0.2-001' '0.2.0.1-002' ''' normalize = lambda x: six.text_type(x).split(':', 1)[-1] \ if ignore_epoch \ else six.text_type(x) ver1 = normalize(ver1) ver2 = normalize(ver2) try: cmp_func = None if HAS_RPM: try: cmp_func = rpm.labelCompare except AttributeError: # Catches corner case where someone has a module named "rpm" in # their pythonpath. log.debug( 'rpm module imported, but it does not have the ' 'labelCompare function. Not using rpm.labelCompare for ' 'version comparison.' ) if cmp_func is None and HAS_RPMUTILS: try: cmp_func = rpmUtils.miscutils.compareEVR except AttributeError: log.debug('rpmUtils.miscutils.compareEVR is not available') if cmp_func is None: if salt.utils.path.which('rpmdev-vercmp'): # rpmdev-vercmp always uses epochs, even when zero def _ensure_epoch(ver): def _prepend(ver): return '0:{0}'.format(ver) try: if ':' not in ver: return _prepend(ver) except TypeError: return _prepend(ver) return ver ver1 = _ensure_epoch(ver1) ver2 = _ensure_epoch(ver2) result = __salt__['cmd.run_all']( ['rpmdev-vercmp', ver1, ver2], python_shell=False, redirect_stderr=True, ignore_retcode=True) # rpmdev-vercmp returns 0 on equal, 11 on greater-than, and # 12 on less-than. if result['retcode'] == 0: return 0 elif result['retcode'] == 11: return 1 elif result['retcode'] == 12: return -1 else: # We'll need to fall back to salt.utils.versions.version_cmp() log.warning( 'Failed to interpret results of rpmdev-vercmp output. ' 'This is probably a bug, and should be reported. ' 'Return code was %s. Output: %s', result['retcode'], result['stdout'] ) else: # We'll need to fall back to salt.utils.versions.version_cmp() log.warning( 'rpmdevtools is not installed, please install it for ' 'more accurate version comparisons' ) else: # If one EVR is missing a release but not the other and they # otherwise would be equal, ignore the release. This can happen if # e.g. you are checking if a package version 3.2 is satisfied by # 3.2-1. (ver1_e, ver1_v, ver1_r) = salt.utils.pkg.rpm.version_to_evr(ver1) (ver2_e, ver2_v, ver2_r) = salt.utils.pkg.rpm.version_to_evr(ver2) if not ver1_r or not ver2_r: ver1_r = ver2_r = '' cmp_result = cmp_func((ver1_e, ver1_v, ver1_r), (ver2_e, ver2_v, ver2_r)) if cmp_result not in (-1, 0, 1): raise CommandExecutionError( 'Comparison result \'{0}\' is invalid'.format(cmp_result) ) return cmp_result except Exception as exc: log.warning( 'Failed to compare version \'%s\' to \'%s\' using RPM: %s', ver1, ver2, exc ) # We would already have normalized the versions at the beginning of this # function if ignore_epoch=True, so avoid unnecessary work and just pass # False for this value. return salt.utils.versions.version_cmp(ver1, ver2, ignore_epoch=False)
saltstack/salt
salt/modules/haproxyconn.py
_get_conn
python
def _get_conn(socket=DEFAULT_SOCKET_URL): ''' Get connection to haproxy socket. ''' assert os.path.exists(socket), '{0} does not exist.'.format(socket) issock = os.stat(socket).st_mode assert stat.S_ISSOCK(issock), '{0} is not a socket.'.format(socket) ha_conn = haproxy.conn.HaPConn(socket) return ha_conn
Get connection to haproxy socket.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/haproxyconn.py#L44-L52
null
# -*- coding: utf-8 -*- ''' Support for haproxy .. versionadded:: 2014.7.0 ''' # Import Python libs from __future__ import absolute_import, generators, print_function, unicode_literals import os import logging import stat import time try: import haproxy.cmds import haproxy.conn HAS_HAPROXY = True except ImportError: HAS_HAPROXY = False log = logging.getLogger(__name__) __virtualname__ = 'haproxy' # Default socket location DEFAULT_SOCKET_URL = '/var/run/haproxy.sock' # Numeric fields returned by stats FIELD_NUMERIC = ["weight", "bin", "bout"] # Field specifying the actual server name FIELD_NODE_NAME = "name" def __virtual__(): ''' Only load the module if haproxyctl is installed ''' if HAS_HAPROXY: return __virtualname__ return (False, 'The haproxyconn execution module cannot be loaded: haproxyctl module not available') def list_servers(backend, socket=DEFAULT_SOCKET_URL, objectify=False): ''' List servers in haproxy backend. backend haproxy backend socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.list_servers mysql ''' ha_conn = _get_conn(socket) ha_cmd = haproxy.cmds.listServers(backend=backend) return ha_conn.sendCmd(ha_cmd, objectify=objectify) def wait_state(backend, server, value='up', timeout=60*5, socket=DEFAULT_SOCKET_URL): ''' Wait for a specific server state backend haproxy backend server targeted server value state value timeout timeout before giving up state value, default 5 min socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.wait_state mysql server01 up 60 ''' t = time.time() + timeout while time.time() < t: if get_backend(backend=backend, socket=socket)[server]["status"].lower() == value.lower(): return True return False def get_backend(backend, socket=DEFAULT_SOCKET_URL): ''' Receive information about a specific backend. backend haproxy backend socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.get_backend mysql ''' backend_data = list_servers(backend=backend, socket=socket).replace('\n', ' ').split(' ') result = {} # Convert given string to Integer def num(s): try: return int(s) except ValueError: return s for data in backend_data: # Check if field or server name if ":" in data: active_field = data.replace(':', '').lower() continue elif active_field.lower() == FIELD_NODE_NAME: active_server = data result[active_server] = {} continue # Format and set returned field data to active server if active_field in FIELD_NUMERIC: if data == "": result[active_server][active_field] = 0 else: result[active_server][active_field] = num(data) else: result[active_server][active_field] = data return result def enable_server(name, backend, socket=DEFAULT_SOCKET_URL): ''' Enable Server in haproxy name Server to enable backend haproxy backend, or all backends if "*" is supplied socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.enable_server web1.example.com www ''' if backend == '*': backends = show_backends(socket=socket).split('\n') else: backends = [backend] results = {} for backend in backends: ha_conn = _get_conn(socket) ha_cmd = haproxy.cmds.enableServer(server=name, backend=backend) ha_conn.sendCmd(ha_cmd) results[backend] = list_servers(backend, socket=socket) return results def disable_server(name, backend, socket=DEFAULT_SOCKET_URL): ''' Disable server in haproxy. name Server to disable backend haproxy backend, or all backends if "*" is supplied socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.disable_server db1.example.com mysql ''' if backend == '*': backends = show_backends(socket=socket).split('\n') else: backends = [backend] results = {} for backend in backends: ha_conn = _get_conn(socket) ha_cmd = haproxy.cmds.disableServer(server=name, backend=backend) ha_conn.sendCmd(ha_cmd) results[backend] = list_servers(backend, socket=socket) return results def get_weight(name, backend, socket=DEFAULT_SOCKET_URL): ''' Get server weight name Server name backend haproxy backend socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.get_weight web1.example.com www ''' ha_conn = _get_conn(socket) ha_cmd = haproxy.cmds.getWeight(server=name, backend=backend) return ha_conn.sendCmd(ha_cmd) def set_weight(name, backend, weight=0, socket=DEFAULT_SOCKET_URL): ''' Set server weight name Server name backend haproxy backend weight Server Weight socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.set_weight web1.example.com www 13 ''' ha_conn = _get_conn(socket) ha_cmd = haproxy.cmds.getWeight(server=name, backend=backend, weight=weight) ha_conn.sendCmd(ha_cmd) return get_weight(name, backend, socket=socket) def set_state(name, backend, state, socket=DEFAULT_SOCKET_URL): ''' Force a server's administrative state to a new state. This can be useful to disable load balancing and/or any traffic to a server. Setting the state to "ready" puts the server in normal mode, and the command is the equivalent of the "enable server" command. Setting the state to "maint" disables any traffic to the server as well as any health checks. This is the equivalent of the "disable server" command. Setting the mode to "drain" only removes the server from load balancing but still allows it to be checked and to accept new persistent connections. Changes are propagated to tracking servers if any. name Server name backend haproxy backend state A string of the state to set. Must be 'ready', 'drain', or 'maint' socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.set_state my_proxy_server my_backend ready ''' # Pulling this in from the latest 0.5 release which is not yet in PyPi. # https://github.com/neurogeek/haproxyctl class setServerState(haproxy.cmds.Cmd): """Set server state command.""" cmdTxt = "set server %(backend)s/%(server)s state %(value)s\r\n" p_args = ['backend', 'server', 'value'] helpTxt = "Force a server's administrative state to a new state." ha_conn = _get_conn(socket) ha_cmd = setServerState(server=name, backend=backend, value=state) return ha_conn.sendCmd(ha_cmd) def show_frontends(socket=DEFAULT_SOCKET_URL): ''' Show HaProxy frontends socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.show_frontends ''' ha_conn = _get_conn(socket) ha_cmd = haproxy.cmds.showFrontends() return ha_conn.sendCmd(ha_cmd) def list_frontends(socket=DEFAULT_SOCKET_URL): ''' List HaProxy frontends socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.list_frontends ''' return show_frontends(socket=socket).split('\n') def show_backends(socket=DEFAULT_SOCKET_URL): ''' Show HaProxy Backends socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.show_backends ''' ha_conn = _get_conn(socket) ha_cmd = haproxy.cmds.showBackends() return ha_conn.sendCmd(ha_cmd) def list_backends(servers=True, socket=DEFAULT_SOCKET_URL): ''' List HaProxy Backends socket haproxy stats socket, default ``/var/run/haproxy.sock`` servers list backends with servers CLI Example: .. code-block:: bash salt '*' haproxy.list_backends ''' if not servers: return show_backends(socket=socket).split('\n') else: result = {} for backend in list_backends(servers=False, socket=socket): result[backend] = get_backend(backend=backend, socket=socket) return result def get_sessions(name, backend, socket=DEFAULT_SOCKET_URL): ''' .. versionadded:: 2016.11.0 Get number of current sessions on server in backend (scur) name Server name backend haproxy backend socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.get_sessions web1.example.com www ''' class getStats(haproxy.cmds.Cmd): p_args = ["backend", "server"] cmdTxt = "show stat\r\n" helpText = "Fetch all statistics" ha_conn = _get_conn(socket) ha_cmd = getStats(server=name, backend=backend) result = ha_conn.sendCmd(ha_cmd) for line in result.split('\n'): if line.startswith(backend): outCols = line.split(',') if outCols[1] == name: return outCols[4]
saltstack/salt
salt/modules/haproxyconn.py
list_servers
python
def list_servers(backend, socket=DEFAULT_SOCKET_URL, objectify=False): ''' List servers in haproxy backend. backend haproxy backend socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.list_servers mysql ''' ha_conn = _get_conn(socket) ha_cmd = haproxy.cmds.listServers(backend=backend) return ha_conn.sendCmd(ha_cmd, objectify=objectify)
List servers in haproxy backend. backend haproxy backend socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.list_servers mysql
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/haproxyconn.py#L55-L73
[ "def _get_conn(socket=DEFAULT_SOCKET_URL):\n '''\n Get connection to haproxy socket.\n '''\n assert os.path.exists(socket), '{0} does not exist.'.format(socket)\n issock = os.stat(socket).st_mode\n assert stat.S_ISSOCK(issock), '{0} is not a socket.'.format(socket)\n ha_conn = haproxy.conn.HaPConn(socket)\n return ha_conn\n" ]
# -*- coding: utf-8 -*- ''' Support for haproxy .. versionadded:: 2014.7.0 ''' # Import Python libs from __future__ import absolute_import, generators, print_function, unicode_literals import os import logging import stat import time try: import haproxy.cmds import haproxy.conn HAS_HAPROXY = True except ImportError: HAS_HAPROXY = False log = logging.getLogger(__name__) __virtualname__ = 'haproxy' # Default socket location DEFAULT_SOCKET_URL = '/var/run/haproxy.sock' # Numeric fields returned by stats FIELD_NUMERIC = ["weight", "bin", "bout"] # Field specifying the actual server name FIELD_NODE_NAME = "name" def __virtual__(): ''' Only load the module if haproxyctl is installed ''' if HAS_HAPROXY: return __virtualname__ return (False, 'The haproxyconn execution module cannot be loaded: haproxyctl module not available') def _get_conn(socket=DEFAULT_SOCKET_URL): ''' Get connection to haproxy socket. ''' assert os.path.exists(socket), '{0} does not exist.'.format(socket) issock = os.stat(socket).st_mode assert stat.S_ISSOCK(issock), '{0} is not a socket.'.format(socket) ha_conn = haproxy.conn.HaPConn(socket) return ha_conn def wait_state(backend, server, value='up', timeout=60*5, socket=DEFAULT_SOCKET_URL): ''' Wait for a specific server state backend haproxy backend server targeted server value state value timeout timeout before giving up state value, default 5 min socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.wait_state mysql server01 up 60 ''' t = time.time() + timeout while time.time() < t: if get_backend(backend=backend, socket=socket)[server]["status"].lower() == value.lower(): return True return False def get_backend(backend, socket=DEFAULT_SOCKET_URL): ''' Receive information about a specific backend. backend haproxy backend socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.get_backend mysql ''' backend_data = list_servers(backend=backend, socket=socket).replace('\n', ' ').split(' ') result = {} # Convert given string to Integer def num(s): try: return int(s) except ValueError: return s for data in backend_data: # Check if field or server name if ":" in data: active_field = data.replace(':', '').lower() continue elif active_field.lower() == FIELD_NODE_NAME: active_server = data result[active_server] = {} continue # Format and set returned field data to active server if active_field in FIELD_NUMERIC: if data == "": result[active_server][active_field] = 0 else: result[active_server][active_field] = num(data) else: result[active_server][active_field] = data return result def enable_server(name, backend, socket=DEFAULT_SOCKET_URL): ''' Enable Server in haproxy name Server to enable backend haproxy backend, or all backends if "*" is supplied socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.enable_server web1.example.com www ''' if backend == '*': backends = show_backends(socket=socket).split('\n') else: backends = [backend] results = {} for backend in backends: ha_conn = _get_conn(socket) ha_cmd = haproxy.cmds.enableServer(server=name, backend=backend) ha_conn.sendCmd(ha_cmd) results[backend] = list_servers(backend, socket=socket) return results def disable_server(name, backend, socket=DEFAULT_SOCKET_URL): ''' Disable server in haproxy. name Server to disable backend haproxy backend, or all backends if "*" is supplied socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.disable_server db1.example.com mysql ''' if backend == '*': backends = show_backends(socket=socket).split('\n') else: backends = [backend] results = {} for backend in backends: ha_conn = _get_conn(socket) ha_cmd = haproxy.cmds.disableServer(server=name, backend=backend) ha_conn.sendCmd(ha_cmd) results[backend] = list_servers(backend, socket=socket) return results def get_weight(name, backend, socket=DEFAULT_SOCKET_URL): ''' Get server weight name Server name backend haproxy backend socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.get_weight web1.example.com www ''' ha_conn = _get_conn(socket) ha_cmd = haproxy.cmds.getWeight(server=name, backend=backend) return ha_conn.sendCmd(ha_cmd) def set_weight(name, backend, weight=0, socket=DEFAULT_SOCKET_URL): ''' Set server weight name Server name backend haproxy backend weight Server Weight socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.set_weight web1.example.com www 13 ''' ha_conn = _get_conn(socket) ha_cmd = haproxy.cmds.getWeight(server=name, backend=backend, weight=weight) ha_conn.sendCmd(ha_cmd) return get_weight(name, backend, socket=socket) def set_state(name, backend, state, socket=DEFAULT_SOCKET_URL): ''' Force a server's administrative state to a new state. This can be useful to disable load balancing and/or any traffic to a server. Setting the state to "ready" puts the server in normal mode, and the command is the equivalent of the "enable server" command. Setting the state to "maint" disables any traffic to the server as well as any health checks. This is the equivalent of the "disable server" command. Setting the mode to "drain" only removes the server from load balancing but still allows it to be checked and to accept new persistent connections. Changes are propagated to tracking servers if any. name Server name backend haproxy backend state A string of the state to set. Must be 'ready', 'drain', or 'maint' socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.set_state my_proxy_server my_backend ready ''' # Pulling this in from the latest 0.5 release which is not yet in PyPi. # https://github.com/neurogeek/haproxyctl class setServerState(haproxy.cmds.Cmd): """Set server state command.""" cmdTxt = "set server %(backend)s/%(server)s state %(value)s\r\n" p_args = ['backend', 'server', 'value'] helpTxt = "Force a server's administrative state to a new state." ha_conn = _get_conn(socket) ha_cmd = setServerState(server=name, backend=backend, value=state) return ha_conn.sendCmd(ha_cmd) def show_frontends(socket=DEFAULT_SOCKET_URL): ''' Show HaProxy frontends socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.show_frontends ''' ha_conn = _get_conn(socket) ha_cmd = haproxy.cmds.showFrontends() return ha_conn.sendCmd(ha_cmd) def list_frontends(socket=DEFAULT_SOCKET_URL): ''' List HaProxy frontends socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.list_frontends ''' return show_frontends(socket=socket).split('\n') def show_backends(socket=DEFAULT_SOCKET_URL): ''' Show HaProxy Backends socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.show_backends ''' ha_conn = _get_conn(socket) ha_cmd = haproxy.cmds.showBackends() return ha_conn.sendCmd(ha_cmd) def list_backends(servers=True, socket=DEFAULT_SOCKET_URL): ''' List HaProxy Backends socket haproxy stats socket, default ``/var/run/haproxy.sock`` servers list backends with servers CLI Example: .. code-block:: bash salt '*' haproxy.list_backends ''' if not servers: return show_backends(socket=socket).split('\n') else: result = {} for backend in list_backends(servers=False, socket=socket): result[backend] = get_backend(backend=backend, socket=socket) return result def get_sessions(name, backend, socket=DEFAULT_SOCKET_URL): ''' .. versionadded:: 2016.11.0 Get number of current sessions on server in backend (scur) name Server name backend haproxy backend socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.get_sessions web1.example.com www ''' class getStats(haproxy.cmds.Cmd): p_args = ["backend", "server"] cmdTxt = "show stat\r\n" helpText = "Fetch all statistics" ha_conn = _get_conn(socket) ha_cmd = getStats(server=name, backend=backend) result = ha_conn.sendCmd(ha_cmd) for line in result.split('\n'): if line.startswith(backend): outCols = line.split(',') if outCols[1] == name: return outCols[4]
saltstack/salt
salt/modules/haproxyconn.py
enable_server
python
def enable_server(name, backend, socket=DEFAULT_SOCKET_URL): ''' Enable Server in haproxy name Server to enable backend haproxy backend, or all backends if "*" is supplied socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.enable_server web1.example.com www ''' if backend == '*': backends = show_backends(socket=socket).split('\n') else: backends = [backend] results = {} for backend in backends: ha_conn = _get_conn(socket) ha_cmd = haproxy.cmds.enableServer(server=name, backend=backend) ha_conn.sendCmd(ha_cmd) results[backend] = list_servers(backend, socket=socket) return results
Enable Server in haproxy name Server to enable backend haproxy backend, or all backends if "*" is supplied socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.enable_server web1.example.com www
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/haproxyconn.py#L158-L190
[ "def _get_conn(socket=DEFAULT_SOCKET_URL):\n '''\n Get connection to haproxy socket.\n '''\n assert os.path.exists(socket), '{0} does not exist.'.format(socket)\n issock = os.stat(socket).st_mode\n assert stat.S_ISSOCK(issock), '{0} is not a socket.'.format(socket)\n ha_conn = haproxy.conn.HaPConn(socket)\n return ha_conn\n", "def list_servers(backend, socket=DEFAULT_SOCKET_URL, objectify=False):\n '''\n List servers in haproxy backend.\n\n backend\n haproxy backend\n\n socket\n haproxy stats socket, default ``/var/run/haproxy.sock``\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' haproxy.list_servers mysql\n '''\n ha_conn = _get_conn(socket)\n ha_cmd = haproxy.cmds.listServers(backend=backend)\n return ha_conn.sendCmd(ha_cmd, objectify=objectify)\n", "def show_backends(socket=DEFAULT_SOCKET_URL):\n '''\n Show HaProxy Backends\n\n socket\n haproxy stats socket, default ``/var/run/haproxy.sock``\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' haproxy.show_backends\n '''\n ha_conn = _get_conn(socket)\n ha_cmd = haproxy.cmds.showBackends()\n return ha_conn.sendCmd(ha_cmd)\n" ]
# -*- coding: utf-8 -*- ''' Support for haproxy .. versionadded:: 2014.7.0 ''' # Import Python libs from __future__ import absolute_import, generators, print_function, unicode_literals import os import logging import stat import time try: import haproxy.cmds import haproxy.conn HAS_HAPROXY = True except ImportError: HAS_HAPROXY = False log = logging.getLogger(__name__) __virtualname__ = 'haproxy' # Default socket location DEFAULT_SOCKET_URL = '/var/run/haproxy.sock' # Numeric fields returned by stats FIELD_NUMERIC = ["weight", "bin", "bout"] # Field specifying the actual server name FIELD_NODE_NAME = "name" def __virtual__(): ''' Only load the module if haproxyctl is installed ''' if HAS_HAPROXY: return __virtualname__ return (False, 'The haproxyconn execution module cannot be loaded: haproxyctl module not available') def _get_conn(socket=DEFAULT_SOCKET_URL): ''' Get connection to haproxy socket. ''' assert os.path.exists(socket), '{0} does not exist.'.format(socket) issock = os.stat(socket).st_mode assert stat.S_ISSOCK(issock), '{0} is not a socket.'.format(socket) ha_conn = haproxy.conn.HaPConn(socket) return ha_conn def list_servers(backend, socket=DEFAULT_SOCKET_URL, objectify=False): ''' List servers in haproxy backend. backend haproxy backend socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.list_servers mysql ''' ha_conn = _get_conn(socket) ha_cmd = haproxy.cmds.listServers(backend=backend) return ha_conn.sendCmd(ha_cmd, objectify=objectify) def wait_state(backend, server, value='up', timeout=60*5, socket=DEFAULT_SOCKET_URL): ''' Wait for a specific server state backend haproxy backend server targeted server value state value timeout timeout before giving up state value, default 5 min socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.wait_state mysql server01 up 60 ''' t = time.time() + timeout while time.time() < t: if get_backend(backend=backend, socket=socket)[server]["status"].lower() == value.lower(): return True return False def get_backend(backend, socket=DEFAULT_SOCKET_URL): ''' Receive information about a specific backend. backend haproxy backend socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.get_backend mysql ''' backend_data = list_servers(backend=backend, socket=socket).replace('\n', ' ').split(' ') result = {} # Convert given string to Integer def num(s): try: return int(s) except ValueError: return s for data in backend_data: # Check if field or server name if ":" in data: active_field = data.replace(':', '').lower() continue elif active_field.lower() == FIELD_NODE_NAME: active_server = data result[active_server] = {} continue # Format and set returned field data to active server if active_field in FIELD_NUMERIC: if data == "": result[active_server][active_field] = 0 else: result[active_server][active_field] = num(data) else: result[active_server][active_field] = data return result def disable_server(name, backend, socket=DEFAULT_SOCKET_URL): ''' Disable server in haproxy. name Server to disable backend haproxy backend, or all backends if "*" is supplied socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.disable_server db1.example.com mysql ''' if backend == '*': backends = show_backends(socket=socket).split('\n') else: backends = [backend] results = {} for backend in backends: ha_conn = _get_conn(socket) ha_cmd = haproxy.cmds.disableServer(server=name, backend=backend) ha_conn.sendCmd(ha_cmd) results[backend] = list_servers(backend, socket=socket) return results def get_weight(name, backend, socket=DEFAULT_SOCKET_URL): ''' Get server weight name Server name backend haproxy backend socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.get_weight web1.example.com www ''' ha_conn = _get_conn(socket) ha_cmd = haproxy.cmds.getWeight(server=name, backend=backend) return ha_conn.sendCmd(ha_cmd) def set_weight(name, backend, weight=0, socket=DEFAULT_SOCKET_URL): ''' Set server weight name Server name backend haproxy backend weight Server Weight socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.set_weight web1.example.com www 13 ''' ha_conn = _get_conn(socket) ha_cmd = haproxy.cmds.getWeight(server=name, backend=backend, weight=weight) ha_conn.sendCmd(ha_cmd) return get_weight(name, backend, socket=socket) def set_state(name, backend, state, socket=DEFAULT_SOCKET_URL): ''' Force a server's administrative state to a new state. This can be useful to disable load balancing and/or any traffic to a server. Setting the state to "ready" puts the server in normal mode, and the command is the equivalent of the "enable server" command. Setting the state to "maint" disables any traffic to the server as well as any health checks. This is the equivalent of the "disable server" command. Setting the mode to "drain" only removes the server from load balancing but still allows it to be checked and to accept new persistent connections. Changes are propagated to tracking servers if any. name Server name backend haproxy backend state A string of the state to set. Must be 'ready', 'drain', or 'maint' socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.set_state my_proxy_server my_backend ready ''' # Pulling this in from the latest 0.5 release which is not yet in PyPi. # https://github.com/neurogeek/haproxyctl class setServerState(haproxy.cmds.Cmd): """Set server state command.""" cmdTxt = "set server %(backend)s/%(server)s state %(value)s\r\n" p_args = ['backend', 'server', 'value'] helpTxt = "Force a server's administrative state to a new state." ha_conn = _get_conn(socket) ha_cmd = setServerState(server=name, backend=backend, value=state) return ha_conn.sendCmd(ha_cmd) def show_frontends(socket=DEFAULT_SOCKET_URL): ''' Show HaProxy frontends socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.show_frontends ''' ha_conn = _get_conn(socket) ha_cmd = haproxy.cmds.showFrontends() return ha_conn.sendCmd(ha_cmd) def list_frontends(socket=DEFAULT_SOCKET_URL): ''' List HaProxy frontends socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.list_frontends ''' return show_frontends(socket=socket).split('\n') def show_backends(socket=DEFAULT_SOCKET_URL): ''' Show HaProxy Backends socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.show_backends ''' ha_conn = _get_conn(socket) ha_cmd = haproxy.cmds.showBackends() return ha_conn.sendCmd(ha_cmd) def list_backends(servers=True, socket=DEFAULT_SOCKET_URL): ''' List HaProxy Backends socket haproxy stats socket, default ``/var/run/haproxy.sock`` servers list backends with servers CLI Example: .. code-block:: bash salt '*' haproxy.list_backends ''' if not servers: return show_backends(socket=socket).split('\n') else: result = {} for backend in list_backends(servers=False, socket=socket): result[backend] = get_backend(backend=backend, socket=socket) return result def get_sessions(name, backend, socket=DEFAULT_SOCKET_URL): ''' .. versionadded:: 2016.11.0 Get number of current sessions on server in backend (scur) name Server name backend haproxy backend socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.get_sessions web1.example.com www ''' class getStats(haproxy.cmds.Cmd): p_args = ["backend", "server"] cmdTxt = "show stat\r\n" helpText = "Fetch all statistics" ha_conn = _get_conn(socket) ha_cmd = getStats(server=name, backend=backend) result = ha_conn.sendCmd(ha_cmd) for line in result.split('\n'): if line.startswith(backend): outCols = line.split(',') if outCols[1] == name: return outCols[4]
saltstack/salt
salt/modules/haproxyconn.py
get_weight
python
def get_weight(name, backend, socket=DEFAULT_SOCKET_URL): ''' Get server weight name Server name backend haproxy backend socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.get_weight web1.example.com www ''' ha_conn = _get_conn(socket) ha_cmd = haproxy.cmds.getWeight(server=name, backend=backend) return ha_conn.sendCmd(ha_cmd)
Get server weight name Server name backend haproxy backend socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.get_weight web1.example.com www
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/haproxyconn.py#L228-L249
[ "def _get_conn(socket=DEFAULT_SOCKET_URL):\n '''\n Get connection to haproxy socket.\n '''\n assert os.path.exists(socket), '{0} does not exist.'.format(socket)\n issock = os.stat(socket).st_mode\n assert stat.S_ISSOCK(issock), '{0} is not a socket.'.format(socket)\n ha_conn = haproxy.conn.HaPConn(socket)\n return ha_conn\n" ]
# -*- coding: utf-8 -*- ''' Support for haproxy .. versionadded:: 2014.7.0 ''' # Import Python libs from __future__ import absolute_import, generators, print_function, unicode_literals import os import logging import stat import time try: import haproxy.cmds import haproxy.conn HAS_HAPROXY = True except ImportError: HAS_HAPROXY = False log = logging.getLogger(__name__) __virtualname__ = 'haproxy' # Default socket location DEFAULT_SOCKET_URL = '/var/run/haproxy.sock' # Numeric fields returned by stats FIELD_NUMERIC = ["weight", "bin", "bout"] # Field specifying the actual server name FIELD_NODE_NAME = "name" def __virtual__(): ''' Only load the module if haproxyctl is installed ''' if HAS_HAPROXY: return __virtualname__ return (False, 'The haproxyconn execution module cannot be loaded: haproxyctl module not available') def _get_conn(socket=DEFAULT_SOCKET_URL): ''' Get connection to haproxy socket. ''' assert os.path.exists(socket), '{0} does not exist.'.format(socket) issock = os.stat(socket).st_mode assert stat.S_ISSOCK(issock), '{0} is not a socket.'.format(socket) ha_conn = haproxy.conn.HaPConn(socket) return ha_conn def list_servers(backend, socket=DEFAULT_SOCKET_URL, objectify=False): ''' List servers in haproxy backend. backend haproxy backend socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.list_servers mysql ''' ha_conn = _get_conn(socket) ha_cmd = haproxy.cmds.listServers(backend=backend) return ha_conn.sendCmd(ha_cmd, objectify=objectify) def wait_state(backend, server, value='up', timeout=60*5, socket=DEFAULT_SOCKET_URL): ''' Wait for a specific server state backend haproxy backend server targeted server value state value timeout timeout before giving up state value, default 5 min socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.wait_state mysql server01 up 60 ''' t = time.time() + timeout while time.time() < t: if get_backend(backend=backend, socket=socket)[server]["status"].lower() == value.lower(): return True return False def get_backend(backend, socket=DEFAULT_SOCKET_URL): ''' Receive information about a specific backend. backend haproxy backend socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.get_backend mysql ''' backend_data = list_servers(backend=backend, socket=socket).replace('\n', ' ').split(' ') result = {} # Convert given string to Integer def num(s): try: return int(s) except ValueError: return s for data in backend_data: # Check if field or server name if ":" in data: active_field = data.replace(':', '').lower() continue elif active_field.lower() == FIELD_NODE_NAME: active_server = data result[active_server] = {} continue # Format and set returned field data to active server if active_field in FIELD_NUMERIC: if data == "": result[active_server][active_field] = 0 else: result[active_server][active_field] = num(data) else: result[active_server][active_field] = data return result def enable_server(name, backend, socket=DEFAULT_SOCKET_URL): ''' Enable Server in haproxy name Server to enable backend haproxy backend, or all backends if "*" is supplied socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.enable_server web1.example.com www ''' if backend == '*': backends = show_backends(socket=socket).split('\n') else: backends = [backend] results = {} for backend in backends: ha_conn = _get_conn(socket) ha_cmd = haproxy.cmds.enableServer(server=name, backend=backend) ha_conn.sendCmd(ha_cmd) results[backend] = list_servers(backend, socket=socket) return results def disable_server(name, backend, socket=DEFAULT_SOCKET_URL): ''' Disable server in haproxy. name Server to disable backend haproxy backend, or all backends if "*" is supplied socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.disable_server db1.example.com mysql ''' if backend == '*': backends = show_backends(socket=socket).split('\n') else: backends = [backend] results = {} for backend in backends: ha_conn = _get_conn(socket) ha_cmd = haproxy.cmds.disableServer(server=name, backend=backend) ha_conn.sendCmd(ha_cmd) results[backend] = list_servers(backend, socket=socket) return results def set_weight(name, backend, weight=0, socket=DEFAULT_SOCKET_URL): ''' Set server weight name Server name backend haproxy backend weight Server Weight socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.set_weight web1.example.com www 13 ''' ha_conn = _get_conn(socket) ha_cmd = haproxy.cmds.getWeight(server=name, backend=backend, weight=weight) ha_conn.sendCmd(ha_cmd) return get_weight(name, backend, socket=socket) def set_state(name, backend, state, socket=DEFAULT_SOCKET_URL): ''' Force a server's administrative state to a new state. This can be useful to disable load balancing and/or any traffic to a server. Setting the state to "ready" puts the server in normal mode, and the command is the equivalent of the "enable server" command. Setting the state to "maint" disables any traffic to the server as well as any health checks. This is the equivalent of the "disable server" command. Setting the mode to "drain" only removes the server from load balancing but still allows it to be checked and to accept new persistent connections. Changes are propagated to tracking servers if any. name Server name backend haproxy backend state A string of the state to set. Must be 'ready', 'drain', or 'maint' socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.set_state my_proxy_server my_backend ready ''' # Pulling this in from the latest 0.5 release which is not yet in PyPi. # https://github.com/neurogeek/haproxyctl class setServerState(haproxy.cmds.Cmd): """Set server state command.""" cmdTxt = "set server %(backend)s/%(server)s state %(value)s\r\n" p_args = ['backend', 'server', 'value'] helpTxt = "Force a server's administrative state to a new state." ha_conn = _get_conn(socket) ha_cmd = setServerState(server=name, backend=backend, value=state) return ha_conn.sendCmd(ha_cmd) def show_frontends(socket=DEFAULT_SOCKET_URL): ''' Show HaProxy frontends socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.show_frontends ''' ha_conn = _get_conn(socket) ha_cmd = haproxy.cmds.showFrontends() return ha_conn.sendCmd(ha_cmd) def list_frontends(socket=DEFAULT_SOCKET_URL): ''' List HaProxy frontends socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.list_frontends ''' return show_frontends(socket=socket).split('\n') def show_backends(socket=DEFAULT_SOCKET_URL): ''' Show HaProxy Backends socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.show_backends ''' ha_conn = _get_conn(socket) ha_cmd = haproxy.cmds.showBackends() return ha_conn.sendCmd(ha_cmd) def list_backends(servers=True, socket=DEFAULT_SOCKET_URL): ''' List HaProxy Backends socket haproxy stats socket, default ``/var/run/haproxy.sock`` servers list backends with servers CLI Example: .. code-block:: bash salt '*' haproxy.list_backends ''' if not servers: return show_backends(socket=socket).split('\n') else: result = {} for backend in list_backends(servers=False, socket=socket): result[backend] = get_backend(backend=backend, socket=socket) return result def get_sessions(name, backend, socket=DEFAULT_SOCKET_URL): ''' .. versionadded:: 2016.11.0 Get number of current sessions on server in backend (scur) name Server name backend haproxy backend socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.get_sessions web1.example.com www ''' class getStats(haproxy.cmds.Cmd): p_args = ["backend", "server"] cmdTxt = "show stat\r\n" helpText = "Fetch all statistics" ha_conn = _get_conn(socket) ha_cmd = getStats(server=name, backend=backend) result = ha_conn.sendCmd(ha_cmd) for line in result.split('\n'): if line.startswith(backend): outCols = line.split(',') if outCols[1] == name: return outCols[4]
saltstack/salt
salt/modules/haproxyconn.py
set_state
python
def set_state(name, backend, state, socket=DEFAULT_SOCKET_URL): ''' Force a server's administrative state to a new state. This can be useful to disable load balancing and/or any traffic to a server. Setting the state to "ready" puts the server in normal mode, and the command is the equivalent of the "enable server" command. Setting the state to "maint" disables any traffic to the server as well as any health checks. This is the equivalent of the "disable server" command. Setting the mode to "drain" only removes the server from load balancing but still allows it to be checked and to accept new persistent connections. Changes are propagated to tracking servers if any. name Server name backend haproxy backend state A string of the state to set. Must be 'ready', 'drain', or 'maint' socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.set_state my_proxy_server my_backend ready ''' # Pulling this in from the latest 0.5 release which is not yet in PyPi. # https://github.com/neurogeek/haproxyctl class setServerState(haproxy.cmds.Cmd): """Set server state command.""" cmdTxt = "set server %(backend)s/%(server)s state %(value)s\r\n" p_args = ['backend', 'server', 'value'] helpTxt = "Force a server's administrative state to a new state." ha_conn = _get_conn(socket) ha_cmd = setServerState(server=name, backend=backend, value=state) return ha_conn.sendCmd(ha_cmd)
Force a server's administrative state to a new state. This can be useful to disable load balancing and/or any traffic to a server. Setting the state to "ready" puts the server in normal mode, and the command is the equivalent of the "enable server" command. Setting the state to "maint" disables any traffic to the server as well as any health checks. This is the equivalent of the "disable server" command. Setting the mode to "drain" only removes the server from load balancing but still allows it to be checked and to accept new persistent connections. Changes are propagated to tracking servers if any. name Server name backend haproxy backend state A string of the state to set. Must be 'ready', 'drain', or 'maint' socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.set_state my_proxy_server my_backend ready
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/haproxyconn.py#L280-L320
[ "def _get_conn(socket=DEFAULT_SOCKET_URL):\n '''\n Get connection to haproxy socket.\n '''\n assert os.path.exists(socket), '{0} does not exist.'.format(socket)\n issock = os.stat(socket).st_mode\n assert stat.S_ISSOCK(issock), '{0} is not a socket.'.format(socket)\n ha_conn = haproxy.conn.HaPConn(socket)\n return ha_conn\n" ]
# -*- coding: utf-8 -*- ''' Support for haproxy .. versionadded:: 2014.7.0 ''' # Import Python libs from __future__ import absolute_import, generators, print_function, unicode_literals import os import logging import stat import time try: import haproxy.cmds import haproxy.conn HAS_HAPROXY = True except ImportError: HAS_HAPROXY = False log = logging.getLogger(__name__) __virtualname__ = 'haproxy' # Default socket location DEFAULT_SOCKET_URL = '/var/run/haproxy.sock' # Numeric fields returned by stats FIELD_NUMERIC = ["weight", "bin", "bout"] # Field specifying the actual server name FIELD_NODE_NAME = "name" def __virtual__(): ''' Only load the module if haproxyctl is installed ''' if HAS_HAPROXY: return __virtualname__ return (False, 'The haproxyconn execution module cannot be loaded: haproxyctl module not available') def _get_conn(socket=DEFAULT_SOCKET_URL): ''' Get connection to haproxy socket. ''' assert os.path.exists(socket), '{0} does not exist.'.format(socket) issock = os.stat(socket).st_mode assert stat.S_ISSOCK(issock), '{0} is not a socket.'.format(socket) ha_conn = haproxy.conn.HaPConn(socket) return ha_conn def list_servers(backend, socket=DEFAULT_SOCKET_URL, objectify=False): ''' List servers in haproxy backend. backend haproxy backend socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.list_servers mysql ''' ha_conn = _get_conn(socket) ha_cmd = haproxy.cmds.listServers(backend=backend) return ha_conn.sendCmd(ha_cmd, objectify=objectify) def wait_state(backend, server, value='up', timeout=60*5, socket=DEFAULT_SOCKET_URL): ''' Wait for a specific server state backend haproxy backend server targeted server value state value timeout timeout before giving up state value, default 5 min socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.wait_state mysql server01 up 60 ''' t = time.time() + timeout while time.time() < t: if get_backend(backend=backend, socket=socket)[server]["status"].lower() == value.lower(): return True return False def get_backend(backend, socket=DEFAULT_SOCKET_URL): ''' Receive information about a specific backend. backend haproxy backend socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.get_backend mysql ''' backend_data = list_servers(backend=backend, socket=socket).replace('\n', ' ').split(' ') result = {} # Convert given string to Integer def num(s): try: return int(s) except ValueError: return s for data in backend_data: # Check if field or server name if ":" in data: active_field = data.replace(':', '').lower() continue elif active_field.lower() == FIELD_NODE_NAME: active_server = data result[active_server] = {} continue # Format and set returned field data to active server if active_field in FIELD_NUMERIC: if data == "": result[active_server][active_field] = 0 else: result[active_server][active_field] = num(data) else: result[active_server][active_field] = data return result def enable_server(name, backend, socket=DEFAULT_SOCKET_URL): ''' Enable Server in haproxy name Server to enable backend haproxy backend, or all backends if "*" is supplied socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.enable_server web1.example.com www ''' if backend == '*': backends = show_backends(socket=socket).split('\n') else: backends = [backend] results = {} for backend in backends: ha_conn = _get_conn(socket) ha_cmd = haproxy.cmds.enableServer(server=name, backend=backend) ha_conn.sendCmd(ha_cmd) results[backend] = list_servers(backend, socket=socket) return results def disable_server(name, backend, socket=DEFAULT_SOCKET_URL): ''' Disable server in haproxy. name Server to disable backend haproxy backend, or all backends if "*" is supplied socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.disable_server db1.example.com mysql ''' if backend == '*': backends = show_backends(socket=socket).split('\n') else: backends = [backend] results = {} for backend in backends: ha_conn = _get_conn(socket) ha_cmd = haproxy.cmds.disableServer(server=name, backend=backend) ha_conn.sendCmd(ha_cmd) results[backend] = list_servers(backend, socket=socket) return results def get_weight(name, backend, socket=DEFAULT_SOCKET_URL): ''' Get server weight name Server name backend haproxy backend socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.get_weight web1.example.com www ''' ha_conn = _get_conn(socket) ha_cmd = haproxy.cmds.getWeight(server=name, backend=backend) return ha_conn.sendCmd(ha_cmd) def set_weight(name, backend, weight=0, socket=DEFAULT_SOCKET_URL): ''' Set server weight name Server name backend haproxy backend weight Server Weight socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.set_weight web1.example.com www 13 ''' ha_conn = _get_conn(socket) ha_cmd = haproxy.cmds.getWeight(server=name, backend=backend, weight=weight) ha_conn.sendCmd(ha_cmd) return get_weight(name, backend, socket=socket) def show_frontends(socket=DEFAULT_SOCKET_URL): ''' Show HaProxy frontends socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.show_frontends ''' ha_conn = _get_conn(socket) ha_cmd = haproxy.cmds.showFrontends() return ha_conn.sendCmd(ha_cmd) def list_frontends(socket=DEFAULT_SOCKET_URL): ''' List HaProxy frontends socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.list_frontends ''' return show_frontends(socket=socket).split('\n') def show_backends(socket=DEFAULT_SOCKET_URL): ''' Show HaProxy Backends socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.show_backends ''' ha_conn = _get_conn(socket) ha_cmd = haproxy.cmds.showBackends() return ha_conn.sendCmd(ha_cmd) def list_backends(servers=True, socket=DEFAULT_SOCKET_URL): ''' List HaProxy Backends socket haproxy stats socket, default ``/var/run/haproxy.sock`` servers list backends with servers CLI Example: .. code-block:: bash salt '*' haproxy.list_backends ''' if not servers: return show_backends(socket=socket).split('\n') else: result = {} for backend in list_backends(servers=False, socket=socket): result[backend] = get_backend(backend=backend, socket=socket) return result def get_sessions(name, backend, socket=DEFAULT_SOCKET_URL): ''' .. versionadded:: 2016.11.0 Get number of current sessions on server in backend (scur) name Server name backend haproxy backend socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.get_sessions web1.example.com www ''' class getStats(haproxy.cmds.Cmd): p_args = ["backend", "server"] cmdTxt = "show stat\r\n" helpText = "Fetch all statistics" ha_conn = _get_conn(socket) ha_cmd = getStats(server=name, backend=backend) result = ha_conn.sendCmd(ha_cmd) for line in result.split('\n'): if line.startswith(backend): outCols = line.split(',') if outCols[1] == name: return outCols[4]
saltstack/salt
salt/modules/haproxyconn.py
show_frontends
python
def show_frontends(socket=DEFAULT_SOCKET_URL): ''' Show HaProxy frontends socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.show_frontends ''' ha_conn = _get_conn(socket) ha_cmd = haproxy.cmds.showFrontends() return ha_conn.sendCmd(ha_cmd)
Show HaProxy frontends socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.show_frontends
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/haproxyconn.py#L323-L338
[ "def _get_conn(socket=DEFAULT_SOCKET_URL):\n '''\n Get connection to haproxy socket.\n '''\n assert os.path.exists(socket), '{0} does not exist.'.format(socket)\n issock = os.stat(socket).st_mode\n assert stat.S_ISSOCK(issock), '{0} is not a socket.'.format(socket)\n ha_conn = haproxy.conn.HaPConn(socket)\n return ha_conn\n" ]
# -*- coding: utf-8 -*- ''' Support for haproxy .. versionadded:: 2014.7.0 ''' # Import Python libs from __future__ import absolute_import, generators, print_function, unicode_literals import os import logging import stat import time try: import haproxy.cmds import haproxy.conn HAS_HAPROXY = True except ImportError: HAS_HAPROXY = False log = logging.getLogger(__name__) __virtualname__ = 'haproxy' # Default socket location DEFAULT_SOCKET_URL = '/var/run/haproxy.sock' # Numeric fields returned by stats FIELD_NUMERIC = ["weight", "bin", "bout"] # Field specifying the actual server name FIELD_NODE_NAME = "name" def __virtual__(): ''' Only load the module if haproxyctl is installed ''' if HAS_HAPROXY: return __virtualname__ return (False, 'The haproxyconn execution module cannot be loaded: haproxyctl module not available') def _get_conn(socket=DEFAULT_SOCKET_URL): ''' Get connection to haproxy socket. ''' assert os.path.exists(socket), '{0} does not exist.'.format(socket) issock = os.stat(socket).st_mode assert stat.S_ISSOCK(issock), '{0} is not a socket.'.format(socket) ha_conn = haproxy.conn.HaPConn(socket) return ha_conn def list_servers(backend, socket=DEFAULT_SOCKET_URL, objectify=False): ''' List servers in haproxy backend. backend haproxy backend socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.list_servers mysql ''' ha_conn = _get_conn(socket) ha_cmd = haproxy.cmds.listServers(backend=backend) return ha_conn.sendCmd(ha_cmd, objectify=objectify) def wait_state(backend, server, value='up', timeout=60*5, socket=DEFAULT_SOCKET_URL): ''' Wait for a specific server state backend haproxy backend server targeted server value state value timeout timeout before giving up state value, default 5 min socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.wait_state mysql server01 up 60 ''' t = time.time() + timeout while time.time() < t: if get_backend(backend=backend, socket=socket)[server]["status"].lower() == value.lower(): return True return False def get_backend(backend, socket=DEFAULT_SOCKET_URL): ''' Receive information about a specific backend. backend haproxy backend socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.get_backend mysql ''' backend_data = list_servers(backend=backend, socket=socket).replace('\n', ' ').split(' ') result = {} # Convert given string to Integer def num(s): try: return int(s) except ValueError: return s for data in backend_data: # Check if field or server name if ":" in data: active_field = data.replace(':', '').lower() continue elif active_field.lower() == FIELD_NODE_NAME: active_server = data result[active_server] = {} continue # Format and set returned field data to active server if active_field in FIELD_NUMERIC: if data == "": result[active_server][active_field] = 0 else: result[active_server][active_field] = num(data) else: result[active_server][active_field] = data return result def enable_server(name, backend, socket=DEFAULT_SOCKET_URL): ''' Enable Server in haproxy name Server to enable backend haproxy backend, or all backends if "*" is supplied socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.enable_server web1.example.com www ''' if backend == '*': backends = show_backends(socket=socket).split('\n') else: backends = [backend] results = {} for backend in backends: ha_conn = _get_conn(socket) ha_cmd = haproxy.cmds.enableServer(server=name, backend=backend) ha_conn.sendCmd(ha_cmd) results[backend] = list_servers(backend, socket=socket) return results def disable_server(name, backend, socket=DEFAULT_SOCKET_URL): ''' Disable server in haproxy. name Server to disable backend haproxy backend, or all backends if "*" is supplied socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.disable_server db1.example.com mysql ''' if backend == '*': backends = show_backends(socket=socket).split('\n') else: backends = [backend] results = {} for backend in backends: ha_conn = _get_conn(socket) ha_cmd = haproxy.cmds.disableServer(server=name, backend=backend) ha_conn.sendCmd(ha_cmd) results[backend] = list_servers(backend, socket=socket) return results def get_weight(name, backend, socket=DEFAULT_SOCKET_URL): ''' Get server weight name Server name backend haproxy backend socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.get_weight web1.example.com www ''' ha_conn = _get_conn(socket) ha_cmd = haproxy.cmds.getWeight(server=name, backend=backend) return ha_conn.sendCmd(ha_cmd) def set_weight(name, backend, weight=0, socket=DEFAULT_SOCKET_URL): ''' Set server weight name Server name backend haproxy backend weight Server Weight socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.set_weight web1.example.com www 13 ''' ha_conn = _get_conn(socket) ha_cmd = haproxy.cmds.getWeight(server=name, backend=backend, weight=weight) ha_conn.sendCmd(ha_cmd) return get_weight(name, backend, socket=socket) def set_state(name, backend, state, socket=DEFAULT_SOCKET_URL): ''' Force a server's administrative state to a new state. This can be useful to disable load balancing and/or any traffic to a server. Setting the state to "ready" puts the server in normal mode, and the command is the equivalent of the "enable server" command. Setting the state to "maint" disables any traffic to the server as well as any health checks. This is the equivalent of the "disable server" command. Setting the mode to "drain" only removes the server from load balancing but still allows it to be checked and to accept new persistent connections. Changes are propagated to tracking servers if any. name Server name backend haproxy backend state A string of the state to set. Must be 'ready', 'drain', or 'maint' socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.set_state my_proxy_server my_backend ready ''' # Pulling this in from the latest 0.5 release which is not yet in PyPi. # https://github.com/neurogeek/haproxyctl class setServerState(haproxy.cmds.Cmd): """Set server state command.""" cmdTxt = "set server %(backend)s/%(server)s state %(value)s\r\n" p_args = ['backend', 'server', 'value'] helpTxt = "Force a server's administrative state to a new state." ha_conn = _get_conn(socket) ha_cmd = setServerState(server=name, backend=backend, value=state) return ha_conn.sendCmd(ha_cmd) def list_frontends(socket=DEFAULT_SOCKET_URL): ''' List HaProxy frontends socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.list_frontends ''' return show_frontends(socket=socket).split('\n') def show_backends(socket=DEFAULT_SOCKET_URL): ''' Show HaProxy Backends socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.show_backends ''' ha_conn = _get_conn(socket) ha_cmd = haproxy.cmds.showBackends() return ha_conn.sendCmd(ha_cmd) def list_backends(servers=True, socket=DEFAULT_SOCKET_URL): ''' List HaProxy Backends socket haproxy stats socket, default ``/var/run/haproxy.sock`` servers list backends with servers CLI Example: .. code-block:: bash salt '*' haproxy.list_backends ''' if not servers: return show_backends(socket=socket).split('\n') else: result = {} for backend in list_backends(servers=False, socket=socket): result[backend] = get_backend(backend=backend, socket=socket) return result def get_sessions(name, backend, socket=DEFAULT_SOCKET_URL): ''' .. versionadded:: 2016.11.0 Get number of current sessions on server in backend (scur) name Server name backend haproxy backend socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.get_sessions web1.example.com www ''' class getStats(haproxy.cmds.Cmd): p_args = ["backend", "server"] cmdTxt = "show stat\r\n" helpText = "Fetch all statistics" ha_conn = _get_conn(socket) ha_cmd = getStats(server=name, backend=backend) result = ha_conn.sendCmd(ha_cmd) for line in result.split('\n'): if line.startswith(backend): outCols = line.split(',') if outCols[1] == name: return outCols[4]
saltstack/salt
salt/modules/haproxyconn.py
show_backends
python
def show_backends(socket=DEFAULT_SOCKET_URL): ''' Show HaProxy Backends socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.show_backends ''' ha_conn = _get_conn(socket) ha_cmd = haproxy.cmds.showBackends() return ha_conn.sendCmd(ha_cmd)
Show HaProxy Backends socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.show_backends
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/haproxyconn.py#L358-L373
[ "def _get_conn(socket=DEFAULT_SOCKET_URL):\n '''\n Get connection to haproxy socket.\n '''\n assert os.path.exists(socket), '{0} does not exist.'.format(socket)\n issock = os.stat(socket).st_mode\n assert stat.S_ISSOCK(issock), '{0} is not a socket.'.format(socket)\n ha_conn = haproxy.conn.HaPConn(socket)\n return ha_conn\n" ]
# -*- coding: utf-8 -*- ''' Support for haproxy .. versionadded:: 2014.7.0 ''' # Import Python libs from __future__ import absolute_import, generators, print_function, unicode_literals import os import logging import stat import time try: import haproxy.cmds import haproxy.conn HAS_HAPROXY = True except ImportError: HAS_HAPROXY = False log = logging.getLogger(__name__) __virtualname__ = 'haproxy' # Default socket location DEFAULT_SOCKET_URL = '/var/run/haproxy.sock' # Numeric fields returned by stats FIELD_NUMERIC = ["weight", "bin", "bout"] # Field specifying the actual server name FIELD_NODE_NAME = "name" def __virtual__(): ''' Only load the module if haproxyctl is installed ''' if HAS_HAPROXY: return __virtualname__ return (False, 'The haproxyconn execution module cannot be loaded: haproxyctl module not available') def _get_conn(socket=DEFAULT_SOCKET_URL): ''' Get connection to haproxy socket. ''' assert os.path.exists(socket), '{0} does not exist.'.format(socket) issock = os.stat(socket).st_mode assert stat.S_ISSOCK(issock), '{0} is not a socket.'.format(socket) ha_conn = haproxy.conn.HaPConn(socket) return ha_conn def list_servers(backend, socket=DEFAULT_SOCKET_URL, objectify=False): ''' List servers in haproxy backend. backend haproxy backend socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.list_servers mysql ''' ha_conn = _get_conn(socket) ha_cmd = haproxy.cmds.listServers(backend=backend) return ha_conn.sendCmd(ha_cmd, objectify=objectify) def wait_state(backend, server, value='up', timeout=60*5, socket=DEFAULT_SOCKET_URL): ''' Wait for a specific server state backend haproxy backend server targeted server value state value timeout timeout before giving up state value, default 5 min socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.wait_state mysql server01 up 60 ''' t = time.time() + timeout while time.time() < t: if get_backend(backend=backend, socket=socket)[server]["status"].lower() == value.lower(): return True return False def get_backend(backend, socket=DEFAULT_SOCKET_URL): ''' Receive information about a specific backend. backend haproxy backend socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.get_backend mysql ''' backend_data = list_servers(backend=backend, socket=socket).replace('\n', ' ').split(' ') result = {} # Convert given string to Integer def num(s): try: return int(s) except ValueError: return s for data in backend_data: # Check if field or server name if ":" in data: active_field = data.replace(':', '').lower() continue elif active_field.lower() == FIELD_NODE_NAME: active_server = data result[active_server] = {} continue # Format and set returned field data to active server if active_field in FIELD_NUMERIC: if data == "": result[active_server][active_field] = 0 else: result[active_server][active_field] = num(data) else: result[active_server][active_field] = data return result def enable_server(name, backend, socket=DEFAULT_SOCKET_URL): ''' Enable Server in haproxy name Server to enable backend haproxy backend, or all backends if "*" is supplied socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.enable_server web1.example.com www ''' if backend == '*': backends = show_backends(socket=socket).split('\n') else: backends = [backend] results = {} for backend in backends: ha_conn = _get_conn(socket) ha_cmd = haproxy.cmds.enableServer(server=name, backend=backend) ha_conn.sendCmd(ha_cmd) results[backend] = list_servers(backend, socket=socket) return results def disable_server(name, backend, socket=DEFAULT_SOCKET_URL): ''' Disable server in haproxy. name Server to disable backend haproxy backend, or all backends if "*" is supplied socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.disable_server db1.example.com mysql ''' if backend == '*': backends = show_backends(socket=socket).split('\n') else: backends = [backend] results = {} for backend in backends: ha_conn = _get_conn(socket) ha_cmd = haproxy.cmds.disableServer(server=name, backend=backend) ha_conn.sendCmd(ha_cmd) results[backend] = list_servers(backend, socket=socket) return results def get_weight(name, backend, socket=DEFAULT_SOCKET_URL): ''' Get server weight name Server name backend haproxy backend socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.get_weight web1.example.com www ''' ha_conn = _get_conn(socket) ha_cmd = haproxy.cmds.getWeight(server=name, backend=backend) return ha_conn.sendCmd(ha_cmd) def set_weight(name, backend, weight=0, socket=DEFAULT_SOCKET_URL): ''' Set server weight name Server name backend haproxy backend weight Server Weight socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.set_weight web1.example.com www 13 ''' ha_conn = _get_conn(socket) ha_cmd = haproxy.cmds.getWeight(server=name, backend=backend, weight=weight) ha_conn.sendCmd(ha_cmd) return get_weight(name, backend, socket=socket) def set_state(name, backend, state, socket=DEFAULT_SOCKET_URL): ''' Force a server's administrative state to a new state. This can be useful to disable load balancing and/or any traffic to a server. Setting the state to "ready" puts the server in normal mode, and the command is the equivalent of the "enable server" command. Setting the state to "maint" disables any traffic to the server as well as any health checks. This is the equivalent of the "disable server" command. Setting the mode to "drain" only removes the server from load balancing but still allows it to be checked and to accept new persistent connections. Changes are propagated to tracking servers if any. name Server name backend haproxy backend state A string of the state to set. Must be 'ready', 'drain', or 'maint' socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.set_state my_proxy_server my_backend ready ''' # Pulling this in from the latest 0.5 release which is not yet in PyPi. # https://github.com/neurogeek/haproxyctl class setServerState(haproxy.cmds.Cmd): """Set server state command.""" cmdTxt = "set server %(backend)s/%(server)s state %(value)s\r\n" p_args = ['backend', 'server', 'value'] helpTxt = "Force a server's administrative state to a new state." ha_conn = _get_conn(socket) ha_cmd = setServerState(server=name, backend=backend, value=state) return ha_conn.sendCmd(ha_cmd) def show_frontends(socket=DEFAULT_SOCKET_URL): ''' Show HaProxy frontends socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.show_frontends ''' ha_conn = _get_conn(socket) ha_cmd = haproxy.cmds.showFrontends() return ha_conn.sendCmd(ha_cmd) def list_frontends(socket=DEFAULT_SOCKET_URL): ''' List HaProxy frontends socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.list_frontends ''' return show_frontends(socket=socket).split('\n') def list_backends(servers=True, socket=DEFAULT_SOCKET_URL): ''' List HaProxy Backends socket haproxy stats socket, default ``/var/run/haproxy.sock`` servers list backends with servers CLI Example: .. code-block:: bash salt '*' haproxy.list_backends ''' if not servers: return show_backends(socket=socket).split('\n') else: result = {} for backend in list_backends(servers=False, socket=socket): result[backend] = get_backend(backend=backend, socket=socket) return result def get_sessions(name, backend, socket=DEFAULT_SOCKET_URL): ''' .. versionadded:: 2016.11.0 Get number of current sessions on server in backend (scur) name Server name backend haproxy backend socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.get_sessions web1.example.com www ''' class getStats(haproxy.cmds.Cmd): p_args = ["backend", "server"] cmdTxt = "show stat\r\n" helpText = "Fetch all statistics" ha_conn = _get_conn(socket) ha_cmd = getStats(server=name, backend=backend) result = ha_conn.sendCmd(ha_cmd) for line in result.split('\n'): if line.startswith(backend): outCols = line.split(',') if outCols[1] == name: return outCols[4]
saltstack/salt
salt/modules/haproxyconn.py
get_sessions
python
def get_sessions(name, backend, socket=DEFAULT_SOCKET_URL): ''' .. versionadded:: 2016.11.0 Get number of current sessions on server in backend (scur) name Server name backend haproxy backend socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.get_sessions web1.example.com www ''' class getStats(haproxy.cmds.Cmd): p_args = ["backend", "server"] cmdTxt = "show stat\r\n" helpText = "Fetch all statistics" ha_conn = _get_conn(socket) ha_cmd = getStats(server=name, backend=backend) result = ha_conn.sendCmd(ha_cmd) for line in result.split('\n'): if line.startswith(backend): outCols = line.split(',') if outCols[1] == name: return outCols[4]
.. versionadded:: 2016.11.0 Get number of current sessions on server in backend (scur) name Server name backend haproxy backend socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.get_sessions web1.example.com www
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/haproxyconn.py#L402-L435
[ "def _get_conn(socket=DEFAULT_SOCKET_URL):\n '''\n Get connection to haproxy socket.\n '''\n assert os.path.exists(socket), '{0} does not exist.'.format(socket)\n issock = os.stat(socket).st_mode\n assert stat.S_ISSOCK(issock), '{0} is not a socket.'.format(socket)\n ha_conn = haproxy.conn.HaPConn(socket)\n return ha_conn\n" ]
# -*- coding: utf-8 -*- ''' Support for haproxy .. versionadded:: 2014.7.0 ''' # Import Python libs from __future__ import absolute_import, generators, print_function, unicode_literals import os import logging import stat import time try: import haproxy.cmds import haproxy.conn HAS_HAPROXY = True except ImportError: HAS_HAPROXY = False log = logging.getLogger(__name__) __virtualname__ = 'haproxy' # Default socket location DEFAULT_SOCKET_URL = '/var/run/haproxy.sock' # Numeric fields returned by stats FIELD_NUMERIC = ["weight", "bin", "bout"] # Field specifying the actual server name FIELD_NODE_NAME = "name" def __virtual__(): ''' Only load the module if haproxyctl is installed ''' if HAS_HAPROXY: return __virtualname__ return (False, 'The haproxyconn execution module cannot be loaded: haproxyctl module not available') def _get_conn(socket=DEFAULT_SOCKET_URL): ''' Get connection to haproxy socket. ''' assert os.path.exists(socket), '{0} does not exist.'.format(socket) issock = os.stat(socket).st_mode assert stat.S_ISSOCK(issock), '{0} is not a socket.'.format(socket) ha_conn = haproxy.conn.HaPConn(socket) return ha_conn def list_servers(backend, socket=DEFAULT_SOCKET_URL, objectify=False): ''' List servers in haproxy backend. backend haproxy backend socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.list_servers mysql ''' ha_conn = _get_conn(socket) ha_cmd = haproxy.cmds.listServers(backend=backend) return ha_conn.sendCmd(ha_cmd, objectify=objectify) def wait_state(backend, server, value='up', timeout=60*5, socket=DEFAULT_SOCKET_URL): ''' Wait for a specific server state backend haproxy backend server targeted server value state value timeout timeout before giving up state value, default 5 min socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.wait_state mysql server01 up 60 ''' t = time.time() + timeout while time.time() < t: if get_backend(backend=backend, socket=socket)[server]["status"].lower() == value.lower(): return True return False def get_backend(backend, socket=DEFAULT_SOCKET_URL): ''' Receive information about a specific backend. backend haproxy backend socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.get_backend mysql ''' backend_data = list_servers(backend=backend, socket=socket).replace('\n', ' ').split(' ') result = {} # Convert given string to Integer def num(s): try: return int(s) except ValueError: return s for data in backend_data: # Check if field or server name if ":" in data: active_field = data.replace(':', '').lower() continue elif active_field.lower() == FIELD_NODE_NAME: active_server = data result[active_server] = {} continue # Format and set returned field data to active server if active_field in FIELD_NUMERIC: if data == "": result[active_server][active_field] = 0 else: result[active_server][active_field] = num(data) else: result[active_server][active_field] = data return result def enable_server(name, backend, socket=DEFAULT_SOCKET_URL): ''' Enable Server in haproxy name Server to enable backend haproxy backend, or all backends if "*" is supplied socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.enable_server web1.example.com www ''' if backend == '*': backends = show_backends(socket=socket).split('\n') else: backends = [backend] results = {} for backend in backends: ha_conn = _get_conn(socket) ha_cmd = haproxy.cmds.enableServer(server=name, backend=backend) ha_conn.sendCmd(ha_cmd) results[backend] = list_servers(backend, socket=socket) return results def disable_server(name, backend, socket=DEFAULT_SOCKET_URL): ''' Disable server in haproxy. name Server to disable backend haproxy backend, or all backends if "*" is supplied socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.disable_server db1.example.com mysql ''' if backend == '*': backends = show_backends(socket=socket).split('\n') else: backends = [backend] results = {} for backend in backends: ha_conn = _get_conn(socket) ha_cmd = haproxy.cmds.disableServer(server=name, backend=backend) ha_conn.sendCmd(ha_cmd) results[backend] = list_servers(backend, socket=socket) return results def get_weight(name, backend, socket=DEFAULT_SOCKET_URL): ''' Get server weight name Server name backend haproxy backend socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.get_weight web1.example.com www ''' ha_conn = _get_conn(socket) ha_cmd = haproxy.cmds.getWeight(server=name, backend=backend) return ha_conn.sendCmd(ha_cmd) def set_weight(name, backend, weight=0, socket=DEFAULT_SOCKET_URL): ''' Set server weight name Server name backend haproxy backend weight Server Weight socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.set_weight web1.example.com www 13 ''' ha_conn = _get_conn(socket) ha_cmd = haproxy.cmds.getWeight(server=name, backend=backend, weight=weight) ha_conn.sendCmd(ha_cmd) return get_weight(name, backend, socket=socket) def set_state(name, backend, state, socket=DEFAULT_SOCKET_URL): ''' Force a server's administrative state to a new state. This can be useful to disable load balancing and/or any traffic to a server. Setting the state to "ready" puts the server in normal mode, and the command is the equivalent of the "enable server" command. Setting the state to "maint" disables any traffic to the server as well as any health checks. This is the equivalent of the "disable server" command. Setting the mode to "drain" only removes the server from load balancing but still allows it to be checked and to accept new persistent connections. Changes are propagated to tracking servers if any. name Server name backend haproxy backend state A string of the state to set. Must be 'ready', 'drain', or 'maint' socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.set_state my_proxy_server my_backend ready ''' # Pulling this in from the latest 0.5 release which is not yet in PyPi. # https://github.com/neurogeek/haproxyctl class setServerState(haproxy.cmds.Cmd): """Set server state command.""" cmdTxt = "set server %(backend)s/%(server)s state %(value)s\r\n" p_args = ['backend', 'server', 'value'] helpTxt = "Force a server's administrative state to a new state." ha_conn = _get_conn(socket) ha_cmd = setServerState(server=name, backend=backend, value=state) return ha_conn.sendCmd(ha_cmd) def show_frontends(socket=DEFAULT_SOCKET_URL): ''' Show HaProxy frontends socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.show_frontends ''' ha_conn = _get_conn(socket) ha_cmd = haproxy.cmds.showFrontends() return ha_conn.sendCmd(ha_cmd) def list_frontends(socket=DEFAULT_SOCKET_URL): ''' List HaProxy frontends socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.list_frontends ''' return show_frontends(socket=socket).split('\n') def show_backends(socket=DEFAULT_SOCKET_URL): ''' Show HaProxy Backends socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.show_backends ''' ha_conn = _get_conn(socket) ha_cmd = haproxy.cmds.showBackends() return ha_conn.sendCmd(ha_cmd) def list_backends(servers=True, socket=DEFAULT_SOCKET_URL): ''' List HaProxy Backends socket haproxy stats socket, default ``/var/run/haproxy.sock`` servers list backends with servers CLI Example: .. code-block:: bash salt '*' haproxy.list_backends ''' if not servers: return show_backends(socket=socket).split('\n') else: result = {} for backend in list_backends(servers=False, socket=socket): result[backend] = get_backend(backend=backend, socket=socket) return result
saltstack/salt
salt/states/beacon.py
present
python
def present(name, save=False, **kwargs): ''' Ensure beacon is configured with the included beacon data. Args: name (str): The name of the beacon ensure is configured. save (bool): ``True`` updates the beacons.conf. Default is ``False``. Returns: dict: A dictionary of information about the results of the state Example: .. code-block:: yaml ps_beacon: beacon.present: - name: ps - save: True - enable: False - services: salt-master: running apache2: stopped ''' ret = {'name': name, 'result': True, 'changes': {}, 'comment': []} current_beacons = __salt__['beacons.list'](return_yaml=False, **kwargs) beacon_data = [{k: v} for k, v in six.iteritems(kwargs)] if name in current_beacons: if beacon_data == current_beacons[name]: ret['comment'].append('Job {0} in correct state'.format(name)) else: if 'test' in __opts__ and __opts__['test']: kwargs['test'] = True result = __salt__['beacons.modify'](name, beacon_data, **kwargs) ret['comment'].append(result['comment']) ret['changes'] = result['changes'] else: result = __salt__['beacons.modify'](name, beacon_data, **kwargs) if not result['result']: ret['result'] = result['result'] ret['comment'] = result['comment'] return ret else: if 'changes' in result: ret['comment'].append('Modifying {0} in beacons'.format(name)) ret['changes'] = result['changes'] else: ret['comment'].append(result['comment']) else: if 'test' in __opts__ and __opts__['test']: kwargs['test'] = True result = __salt__['beacons.add'](name, beacon_data, **kwargs) ret['comment'].append(result['comment']) else: result = __salt__['beacons.add'](name, beacon_data, **kwargs) if not result['result']: ret['result'] = result['result'] ret['comment'] = result['comment'] return ret else: ret['comment'].append('Adding {0} to beacons'.format(name)) if save: __salt__['beacons.save'](**kwargs) ret['comment'].append('Beacon {0} saved'.format(name)) ret['comment'] = '\n'.join(ret['comment']) return ret
Ensure beacon is configured with the included beacon data. Args: name (str): The name of the beacon ensure is configured. save (bool): ``True`` updates the beacons.conf. Default is ``False``. Returns: dict: A dictionary of information about the results of the state Example: .. code-block:: yaml ps_beacon: beacon.present: - name: ps - save: True - enable: False - services: salt-master: running apache2: stopped
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/beacon.py#L80-L160
[ "def iteritems(d, **kw):\n return d.iteritems(**kw)\n" ]
# -*- coding: utf-8 -*- ''' Management of the Salt beacons ============================== .. versionadded:: 2015.8.0 .. code-block:: yaml ps: beacon.present: - save: True - enable: False - services: salt-master: running apache2: stopped sh: beacon.present: [] load: beacon.present: - averages: 1m: - 0.0 - 2.0 5m: - 0.0 - 1.5 15m: - 0.1 - 1.0 .. versionadded:: Neon Beginning in the Neon release, multiple copies of a beacon can be configured using the ``beacon_module`` parameter. inotify_infs: beacon.present: - save: True - enable: True - files: /etc/infs.conf: mask: - create - delete - modify recurse: True auto_add: True - interval: 10 - beacon_module: inotify - disable_during_state_run: True inotify_ntp: beacon.present: - save: True - enable: True - files: /etc/ntp.conf: mask: - create - delete - modify recurse: True auto_add: True - interval: 10 - beacon_module: inotify - disable_during_state_run: True ''' from __future__ import absolute_import, print_function, unicode_literals # Import Salt libs from salt.ext import six import logging log = logging.getLogger(__name__) def absent(name, save=False, **kwargs): ''' Ensure beacon is absent. Args: name (str): The name of the beacon ensured absent. save (bool): ``True`` updates the beacons.conf file. Default is ``False``. Returns: dict: A dictionary containing the results of the state run Example: .. code-block:: yaml remove_beacon: beacon.absent: - name: ps - save: True ''' ret = {'name': name, 'result': True, 'changes': {}, 'comment': []} current_beacons = __salt__['beacons.list'](return_yaml=False, **kwargs) if name in current_beacons: if 'test' in __opts__ and __opts__['test']: kwargs['test'] = True result = __salt__['beacons.delete'](name, **kwargs) ret['comment'].append(result['comment']) else: result = __salt__['beacons.delete'](name, **kwargs) if not result['result']: ret['result'] = result['result'] ret['comment'] = result['comment'] return ret else: ret['comment'].append('Removed {0} from beacons'.format(name)) else: ret['comment'].append('{0} not configured in beacons'.format(name)) if save: __salt__['beacons.save'](**kwargs) ret['comment'].append('Beacon {0} saved'.format(name)) ret['comment'] = '\n'.join(ret['comment']) return ret def enabled(name, **kwargs): ''' Enable a beacon. Args: name (str): The name of the beacon to enable. Returns: dict: A dictionary containing the results of the state run Example: .. code-block:: yaml enable_beacon: beacon.enabled: - name: ps ''' ret = {'name': name, 'result': True, 'changes': {}, 'comment': []} current_beacons = __salt__['beacons.list'](return_yaml=False, **kwargs) if name in current_beacons: if 'test' in __opts__ and __opts__['test']: kwargs['test'] = True result = __salt__['beacons.enable_beacon'](name, **kwargs) ret['comment'].append(result['comment']) else: result = __salt__['beacons.enable_beacon'](name, **kwargs) if not result['result']: ret['result'] = result['result'] ret['comment'] = result['comment'] return ret else: ret['comment'].append('Enabled {0} from beacons'.format(name)) else: ret['comment'].append('{0} not a configured beacon'.format(name)) ret['comment'] = '\n'.join(ret['comment']) return ret def disabled(name, **kwargs): ''' Disable a beacon. Args: name (str): The name of the beacon to disable. Returns: dict: A dictionary containing the results of the state run Example: .. code-block:: yaml disable_beacon: beacon.disabled: - name: ps ''' ret = {'name': name, 'result': True, 'changes': {}, 'comment': []} current_beacons = __salt__['beacons.list'](return_yaml=False, **kwargs) if name in current_beacons: if 'test' in __opts__ and __opts__['test']: kwargs['test'] = True result = __salt__['beacons.disable_beacon'](name, **kwargs) ret['comment'].append(result['comment']) else: result = __salt__['beacons.disable_beacon'](name, **kwargs) if not result['result']: ret['result'] = result['result'] ret['comment'] = result['comment'] return ret else: ret['comment'].append('Disabled beacon {0}.'.format(name)) else: ret['comment'].append('Job {0} is not configured.'.format(name)) ret['comment'] = '\n'.join(ret['comment']) return ret
saltstack/salt
salt/modules/netscaler.py
_connect
python
def _connect(**kwargs): ''' Initialise netscaler connection ''' connargs = dict() # Shamelessy ripped from the mysql module def __connarg(name, key=None, default=None): ''' Add key to connargs, only if name exists in our kwargs or as netscaler.<name> in __opts__ or __pillar__ Evaluate in said order - kwargs, opts then pillar. To avoid collision with other functions, kwargs-based connection arguments are prefixed with 'netscaler_' (i.e. 'netscaler_host', 'netscaler_user', etc.). ''' if key is None: key = name if name in kwargs: connargs[key] = kwargs[name] else: prefix = 'netscaler_' if name.startswith(prefix): try: name = name[len(prefix):] except IndexError: return val = __salt__['config.option']('netscaler.{0}'.format(name), None) if val is not None: connargs[key] = val elif default is not None: connargs[key] = default __connarg('netscaler_host', 'host') __connarg('netscaler_user', 'user') __connarg('netscaler_pass', 'pass') __connarg('netscaler_useSSL', 'useSSL', True) nitro = NSNitro(connargs['host'], connargs['user'], connargs['pass'], connargs['useSSL']) try: nitro.login() except NSNitroError as error: log.debug('netscaler module error - NSNitro.login() failed: %s', error) return None return nitro
Initialise netscaler connection
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/netscaler.py#L89-L132
[ "def __connarg(name, key=None, default=None):\n '''\n Add key to connargs, only if name exists in our kwargs or as\n netscaler.<name> in __opts__ or __pillar__ Evaluate in said order - kwargs,\n opts then pillar. To avoid collision with other functions, kwargs-based\n connection arguments are prefixed with 'netscaler_' (i.e.\n 'netscaler_host', 'netscaler_user', etc.).\n '''\n if key is None:\n key = name\n if name in kwargs:\n connargs[key] = kwargs[name]\n else:\n prefix = 'netscaler_'\n if name.startswith(prefix):\n try:\n name = name[len(prefix):]\n except IndexError:\n return\n val = __salt__['config.option']('netscaler.{0}'.format(name), None)\n if val is not None:\n connargs[key] = val\n elif default is not None:\n connargs[key] = default\n" ]
# -*- coding: utf-8 -*- ''' Module to provide Citrix Netscaler compatibility to Salt (compatible with netscaler 9.2+) .. versionadded:: 2015.2.0 :depends: - nsnitro Python module .. note:: You can install nsnitro using: .. code-block:: bash pip install nsnitro :configuration: This module accepts connection configuration details either as parameters, or as configuration settings in /etc/salt/minion on the relevant minions .. code-block:: yaml netscaler.host: 1.2.3.4 netscaler.user: user netscaler.pass: password This data can also be passed into pillar. Options passed into opts will overwrite options passed into pillar. :CLI Examples: Calls relying on configuration passed using /etc/salt/minion, grains, or pillars: .. code-block:: bash salt-call netscaler.server_exists server_name Calls passing configuration as opts .. code-block:: bash salt-call netscaler.server_exists server_name netscaler_host=1.2.3.4 netscaler_user=username netscaler_pass=password salt-call netscaler.server_exists server_name netscaler_host=1.2.3.5 netscaler_user=username2 netscaler_pass=password2 salt-call netscaler.server_enable server_name2 netscaler_host=1.2.3.5 salt-call netscaler.server_up server_name3 netscaler_host=1.2.3.6 netscaler_useSSL=False ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import logging # Import Salt libs import salt.utils.platform try: from nsnitro.nsnitro import NSNitro from nsnitro.nsexceptions import NSNitroError from nsnitro.nsresources.nsserver import NSServer from nsnitro.nsresources.nsservice import NSService from nsnitro.nsresources.nsservicegroup import NSServiceGroup from nsnitro.nsresources.nsservicegroupserverbinding import NSServiceGroupServerBinding from nsnitro.nsresources.nslbvserver import NSLBVServer from nsnitro.nsresources.nslbvserverservicegroupbinding import NSLBVServerServiceGroupBinding from nsnitro.nsresources.nssslvserversslcertkeybinding import NSSSLVServerSSLCertKeyBinding HAS_NSNITRO = True except ImportError: HAS_NSNITRO = False log = logging.getLogger(__name__) def __virtual__(): ''' Only load this module if the nsnitro library is installed ''' if salt.utils.platform.is_windows(): return ( False, 'The netscaler execution module failed to load: not available ' 'on Windows.' ) if HAS_NSNITRO: return 'netscaler' return ( False, 'The netscaler execution module failed to load: the nsnitro python ' 'library is not available.' ) def _disconnect(nitro): try: nitro.logout() except NSNitroError as error: log.debug('netscaler module error - NSNitro.logout() failed: %s', error) return None return nitro def _servicegroup_get(sg_name, **connection_args): ''' Return a service group ressource or None ''' nitro = _connect(**connection_args) if nitro is None: return None sg = NSServiceGroup() sg.set_servicegroupname(sg_name) try: sg = NSServiceGroup.get(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.get() failed: %s', error) sg = None _disconnect(nitro) return sg def _servicegroup_get_servers(sg_name, **connection_args): ''' Returns a list of members of a servicegroup or None ''' nitro = _connect(**connection_args) if nitro is None: return None sg = NSServiceGroup() sg.set_servicegroupname(sg_name) try: sg = NSServiceGroup.get_servers(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.get_servers failed(): %s', error) sg = None _disconnect(nitro) return sg def _servicegroup_get_server(sg_name, s_name, s_port=None, **connection_args): ''' Returns a member of a service group or None ''' ret = None servers = _servicegroup_get_servers(sg_name, **connection_args) if servers is None: return None for server in servers: if server.get_servername() == s_name: if s_port is not None and s_port != server.get_port(): ret = None ret = server return ret def servicegroup_exists(sg_name, sg_type=None, **connection_args): ''' Checks if a service group exists CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_exists 'serviceGroupName' ''' sg = _servicegroup_get(sg_name, **connection_args) if sg is None: return False if sg_type is not None and sg_type.upper() != sg.get_servicetype(): return False return True def servicegroup_add(sg_name, sg_type='HTTP', **connection_args): ''' Add a new service group If no service type is specified, HTTP will be used. Most common service types: HTTP, SSL, and SSL_BRIDGE CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_add 'serviceGroupName' salt '*' netscaler.servicegroup_add 'serviceGroupName' 'serviceGroupType' ''' ret = True if servicegroup_exists(sg_name): return False nitro = _connect(**connection_args) if nitro is None: return False sg = NSServiceGroup() sg.set_servicegroupname(sg_name) sg.set_servicetype(sg_type.upper()) try: NSServiceGroup.add(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.add() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_delete(sg_name, **connection_args): ''' Delete a new service group CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_delete 'serviceGroupName' ''' ret = True sg = _servicegroup_get(sg_name, **connection_args) if sg is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.delete(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_exists(sg_name, s_name, s_port=None, **connection_args): ''' Check if a server:port combination is a member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_exists 'serviceGroupName' 'serverName' 'serverPort' ''' return _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) is not None def servicegroup_server_up(sg_name, s_name, s_port, **connection_args): ''' Check if a server:port combination is in state UP in a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_up 'serviceGroupName' 'serverName' 'serverPort' ''' server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) return server is not None and server.get_svrstate() == 'UP' def servicegroup_server_enable(sg_name, s_name, s_port, **connection_args): ''' Enable a server:port member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_enable 'serviceGroupName' 'serverName' 'serverPort' ''' ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.enable_server(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.enable_server() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_disable(sg_name, s_name, s_port, **connection_args): ''' Disable a server:port member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_disable 'serviceGroupName' 'serverName' 'serverPort' ''' ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.disable_server(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.disable_server() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_add(sg_name, s_name, s_port, **connection_args): ''' Add a server:port member to a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_add 'serviceGroupName' 'serverName' 'serverPort' ''' # Nitro will throw an error if the server is already present ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is not None: return False nitro = _connect(**connection_args) if nitro is None: return False sgsb = NSServiceGroupServerBinding() sgsb.set_servicegroupname(sg_name) sgsb.set_servername(s_name) sgsb.set_port(s_port) try: NSServiceGroupServerBinding.add(nitro, sgsb) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroupServerBinding() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_delete(sg_name, s_name, s_port, **connection_args): ''' Remove a server:port member from a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_delete 'serviceGroupName' 'serverName' 'serverPort' ''' # Nitro will throw an error if the server is already not present ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False sgsb = NSServiceGroupServerBinding() sgsb.set_servicegroupname(sg_name) sgsb.set_servername(s_name) sgsb.set_port(s_port) try: NSServiceGroupServerBinding.delete(nitro, sgsb) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroupServerBinding() failed: %s', error) ret = False _disconnect(nitro) return ret def _service_get(s_name, **connection_args): ''' Returns a service ressource or None ''' nitro = _connect(**connection_args) if nitro is None: return None service = NSService() service.set_name(s_name) try: service = NSService.get(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.get() failed: %s', error) service = None _disconnect(nitro) return service def service_exists(s_name, **connection_args): ''' Checks if a service exists CLI Example: .. code-block:: bash salt '*' netscaler.service_exists 'serviceName' ''' return _service_get(s_name, **connection_args) is not None def service_up(s_name, **connection_args): ''' Checks if a service is UP CLI Example: .. code-block:: bash salt '*' netscaler.service_up 'serviceName' ''' service = _service_get(s_name, **connection_args) return service is not None and service.get_svrstate() == 'UP' def service_enable(s_name, **connection_args): ''' Enable a service CLI Example: .. code-block:: bash salt '*' netscaler.service_enable 'serviceName' ''' ret = True service = _service_get(s_name, **connection_args) if service is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSService.enable(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def service_disable(s_name, s_delay=None, **connection_args): ''' Disable a service CLI Example: .. code-block:: bash salt '*' netscaler.service_disable 'serviceName' salt '*' netscaler.service_disable 'serviceName' 'delayInSeconds' ''' ret = True service = _service_get(s_name, **connection_args) if service is None: return False if s_delay is not None: service.set_delay(s_delay) nitro = _connect(**connection_args) if nitro is None: return False try: NSService.disable(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def _server_get(s_name, **connection_args): nitro = _connect(**connection_args) if nitro is None: return None server = NSServer() server.set_name(s_name) try: server = NSServer.get(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.get() failed: %s', error) server = None _disconnect(nitro) return server def server_exists(s_name, ip=None, s_state=None, **connection_args): ''' Checks if a server exists CLI Example: .. code-block:: bash salt '*' netscaler.server_exists 'serverName' ''' server = _server_get(s_name, **connection_args) if server is None: return False if ip is not None and ip != server.get_ipaddress(): return False if s_state is not None and s_state.upper() != server.get_state(): return False return True def server_add(s_name, s_ip, s_state=None, **connection_args): ''' Add a server Note: The default server state is ENABLED CLI Example: .. code-block:: bash salt '*' netscaler.server_add 'serverName' 'serverIpAddress' salt '*' netscaler.server_add 'serverName' 'serverIpAddress' 'serverState' ''' ret = True if server_exists(s_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False server = NSServer() server.set_name(s_name) server.set_ipaddress(s_ip) if s_state is not None: server.set_state(s_state) try: NSServer.add(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.add() failed: %s', error) ret = False _disconnect(nitro) return ret def server_delete(s_name, **connection_args): ''' Delete a server CLI Example: .. code-block:: bash salt '*' netscaler.server_delete 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.delete(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def server_update(s_name, s_ip, **connection_args): ''' Update a server's attributes CLI Example: .. code-block:: bash salt '*' netscaler.server_update 'serverName' 'serverIP' ''' altered = False cur_server = _server_get(s_name, **connection_args) if cur_server is None: return False alt_server = NSServer() alt_server.set_name(s_name) if cur_server.get_ipaddress() != s_ip: alt_server.set_ipaddress(s_ip) altered = True # Nothing to update, the server is already idem if altered is False: return False # Perform the update nitro = _connect(**connection_args) if nitro is None: return False ret = True try: NSServer.update(nitro, alt_server) except NSNitroError as error: log.debug('netscaler module error - NSServer.update() failed: %s', error) ret = False _disconnect(nitro) return ret def server_enabled(s_name, **connection_args): ''' Check if a server is enabled globally CLI Example: .. code-block:: bash salt '*' netscaler.server_enabled 'serverName' ''' server = _server_get(s_name, **connection_args) return server is not None and server.get_state() == 'ENABLED' def server_enable(s_name, **connection_args): ''' Enables a server globally CLI Example: .. code-block:: bash salt '*' netscaler.server_enable 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False if server.get_state() == 'ENABLED': return True nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.enable(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def server_disable(s_name, **connection_args): ''' Disable a server globally CLI Example: .. code-block:: bash salt '*' netscaler.server_disable 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False if server.get_state() == 'DISABLED': return True nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.disable(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.disable() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_get(v_name, **connection_args): nitro = _connect(**connection_args) vserver = NSLBVServer() vserver.set_name(v_name) if nitro is None: return None try: vserver = NSLBVServer.get(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSLBVServer.get() failed: %s', error) vserver = None _disconnect(nitro) return vserver def vserver_exists(v_name, v_ip=None, v_port=None, v_type=None, **connection_args): ''' Checks if a vserver exists CLI Example: .. code-block:: bash salt '*' netscaler.vserver_exists 'vserverName' ''' vserver = _vserver_get(v_name, **connection_args) if vserver is None: return False if v_ip is not None and vserver.get_ipv46() != v_ip: return False if v_port is not None and vserver.get_port() != v_port: return False if v_type is not None and vserver.get_servicetype().upper() != v_type.upper(): return False return True def vserver_add(v_name, v_ip, v_port, v_type, **connection_args): ''' Add a new lb vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_add 'vserverName' 'vserverIP' 'vserverPort' 'vserverType' salt '*' netscaler.vserver_add 'alex.patate.chaude.443' '1.2.3.4' '443' 'SSL' ''' ret = True if vserver_exists(v_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vserver = NSLBVServer() vserver.set_name(v_name) vserver.set_ipv46(v_ip) vserver.set_port(v_port) vserver.set_servicetype(v_type.upper()) try: NSLBVServer.add(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSLBVServer.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_delete(v_name, **connection_args): ''' Delete a lb vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_delete 'vserverName' ''' ret = True vserver = _vserver_get(v_name, **connection_args) if vserver is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSLBVServer.delete(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSVServer.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_servicegroup_get(v_name, sg_name, **connection_args): ret = None nitro = _connect(**connection_args) if nitro is None: return None vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) try: vsgs = NSLBVServerServiceGroupBinding.get(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.get() failed: %s', error) return None for vsg in vsgs: if vsg.get_servicegroupname() == sg_name: ret = vsg _disconnect(nitro) return ret def vserver_servicegroup_exists(v_name, sg_name, **connection_args): ''' Checks if a servicegroup is tied to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_exists 'vserverName' 'serviceGroupName' ''' return _vserver_servicegroup_get(v_name, sg_name, **connection_args) is not None def vserver_servicegroup_add(v_name, sg_name, **connection_args): ''' Bind a servicegroup to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_add 'vserverName' 'serviceGroupName' ''' ret = True if vserver_servicegroup_exists(v_name, sg_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) vsg.set_servicegroupname(sg_name) try: NSLBVServerServiceGroupBinding.add(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_servicegroup_delete(v_name, sg_name, **connection_args): ''' Unbind a servicegroup from a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_delete 'vserverName' 'serviceGroupName' ''' ret = True if not vserver_servicegroup_exists(v_name, sg_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) vsg.set_servicegroupname(sg_name) try: NSLBVServerServiceGroupBinding.delete(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_sslcert_get(v_name, sc_name, **connection_args): ret = None nitro = _connect(**connection_args) if nitro is None: return None sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) try: sslcerts = NSSSLVServerSSLCertKeyBinding.get(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.get() failed: %s', error) return None for sslcert in sslcerts: if sslcert.get_certkeyname() == sc_name: ret = sslcert return ret def vserver_sslcert_exists(v_name, sc_name, **connection_args): ''' Checks if a SSL certificate is tied to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_exists 'vserverName' 'sslCertificateName' ''' return _vserver_sslcert_get(v_name, sc_name, **connection_args) is not None def vserver_sslcert_add(v_name, sc_name, **connection_args): ''' Binds a SSL certificate to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_add 'vserverName' 'sslCertificateName' ''' ret = True if vserver_sslcert_exists(v_name, sc_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) sslcert.set_certkeyname(sc_name) try: NSSSLVServerSSLCertKeyBinding.add(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_sslcert_delete(v_name, sc_name, **connection_args): ''' Unbinds a SSL certificate from a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_delete 'vserverName' 'sslCertificateName' ''' ret = True if not vserver_sslcert_exists(v_name, sc_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) sslcert.set_certkeyname(sc_name) try: NSSSLVServerSSLCertKeyBinding.delete(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.delete() failed: %s', error) ret = False _disconnect(nitro) return ret
saltstack/salt
salt/modules/netscaler.py
_servicegroup_get
python
def _servicegroup_get(sg_name, **connection_args): ''' Return a service group ressource or None ''' nitro = _connect(**connection_args) if nitro is None: return None sg = NSServiceGroup() sg.set_servicegroupname(sg_name) try: sg = NSServiceGroup.get(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.get() failed: %s', error) sg = None _disconnect(nitro) return sg
Return a service group ressource or None
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/netscaler.py#L144-L159
[ "def _connect(**kwargs):\n '''\n Initialise netscaler connection\n '''\n connargs = dict()\n\n # Shamelessy ripped from the mysql module\n def __connarg(name, key=None, default=None):\n '''\n Add key to connargs, only if name exists in our kwargs or as\n netscaler.<name> in __opts__ or __pillar__ Evaluate in said order - kwargs,\n opts then pillar. To avoid collision with other functions, kwargs-based\n connection arguments are prefixed with 'netscaler_' (i.e.\n 'netscaler_host', 'netscaler_user', etc.).\n '''\n if key is None:\n key = name\n if name in kwargs:\n connargs[key] = kwargs[name]\n else:\n prefix = 'netscaler_'\n if name.startswith(prefix):\n try:\n name = name[len(prefix):]\n except IndexError:\n return\n val = __salt__['config.option']('netscaler.{0}'.format(name), None)\n if val is not None:\n connargs[key] = val\n elif default is not None:\n connargs[key] = default\n\n __connarg('netscaler_host', 'host')\n __connarg('netscaler_user', 'user')\n __connarg('netscaler_pass', 'pass')\n __connarg('netscaler_useSSL', 'useSSL', True)\n\n nitro = NSNitro(connargs['host'], connargs['user'], connargs['pass'], connargs['useSSL'])\n try:\n nitro.login()\n except NSNitroError as error:\n log.debug('netscaler module error - NSNitro.login() failed: %s', error)\n return None\n return nitro\n", "def _disconnect(nitro):\n try:\n nitro.logout()\n except NSNitroError as error:\n log.debug('netscaler module error - NSNitro.logout() failed: %s', error)\n return None\n return nitro\n" ]
# -*- coding: utf-8 -*- ''' Module to provide Citrix Netscaler compatibility to Salt (compatible with netscaler 9.2+) .. versionadded:: 2015.2.0 :depends: - nsnitro Python module .. note:: You can install nsnitro using: .. code-block:: bash pip install nsnitro :configuration: This module accepts connection configuration details either as parameters, or as configuration settings in /etc/salt/minion on the relevant minions .. code-block:: yaml netscaler.host: 1.2.3.4 netscaler.user: user netscaler.pass: password This data can also be passed into pillar. Options passed into opts will overwrite options passed into pillar. :CLI Examples: Calls relying on configuration passed using /etc/salt/minion, grains, or pillars: .. code-block:: bash salt-call netscaler.server_exists server_name Calls passing configuration as opts .. code-block:: bash salt-call netscaler.server_exists server_name netscaler_host=1.2.3.4 netscaler_user=username netscaler_pass=password salt-call netscaler.server_exists server_name netscaler_host=1.2.3.5 netscaler_user=username2 netscaler_pass=password2 salt-call netscaler.server_enable server_name2 netscaler_host=1.2.3.5 salt-call netscaler.server_up server_name3 netscaler_host=1.2.3.6 netscaler_useSSL=False ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import logging # Import Salt libs import salt.utils.platform try: from nsnitro.nsnitro import NSNitro from nsnitro.nsexceptions import NSNitroError from nsnitro.nsresources.nsserver import NSServer from nsnitro.nsresources.nsservice import NSService from nsnitro.nsresources.nsservicegroup import NSServiceGroup from nsnitro.nsresources.nsservicegroupserverbinding import NSServiceGroupServerBinding from nsnitro.nsresources.nslbvserver import NSLBVServer from nsnitro.nsresources.nslbvserverservicegroupbinding import NSLBVServerServiceGroupBinding from nsnitro.nsresources.nssslvserversslcertkeybinding import NSSSLVServerSSLCertKeyBinding HAS_NSNITRO = True except ImportError: HAS_NSNITRO = False log = logging.getLogger(__name__) def __virtual__(): ''' Only load this module if the nsnitro library is installed ''' if salt.utils.platform.is_windows(): return ( False, 'The netscaler execution module failed to load: not available ' 'on Windows.' ) if HAS_NSNITRO: return 'netscaler' return ( False, 'The netscaler execution module failed to load: the nsnitro python ' 'library is not available.' ) def _connect(**kwargs): ''' Initialise netscaler connection ''' connargs = dict() # Shamelessy ripped from the mysql module def __connarg(name, key=None, default=None): ''' Add key to connargs, only if name exists in our kwargs or as netscaler.<name> in __opts__ or __pillar__ Evaluate in said order - kwargs, opts then pillar. To avoid collision with other functions, kwargs-based connection arguments are prefixed with 'netscaler_' (i.e. 'netscaler_host', 'netscaler_user', etc.). ''' if key is None: key = name if name in kwargs: connargs[key] = kwargs[name] else: prefix = 'netscaler_' if name.startswith(prefix): try: name = name[len(prefix):] except IndexError: return val = __salt__['config.option']('netscaler.{0}'.format(name), None) if val is not None: connargs[key] = val elif default is not None: connargs[key] = default __connarg('netscaler_host', 'host') __connarg('netscaler_user', 'user') __connarg('netscaler_pass', 'pass') __connarg('netscaler_useSSL', 'useSSL', True) nitro = NSNitro(connargs['host'], connargs['user'], connargs['pass'], connargs['useSSL']) try: nitro.login() except NSNitroError as error: log.debug('netscaler module error - NSNitro.login() failed: %s', error) return None return nitro def _disconnect(nitro): try: nitro.logout() except NSNitroError as error: log.debug('netscaler module error - NSNitro.logout() failed: %s', error) return None return nitro def _servicegroup_get_servers(sg_name, **connection_args): ''' Returns a list of members of a servicegroup or None ''' nitro = _connect(**connection_args) if nitro is None: return None sg = NSServiceGroup() sg.set_servicegroupname(sg_name) try: sg = NSServiceGroup.get_servers(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.get_servers failed(): %s', error) sg = None _disconnect(nitro) return sg def _servicegroup_get_server(sg_name, s_name, s_port=None, **connection_args): ''' Returns a member of a service group or None ''' ret = None servers = _servicegroup_get_servers(sg_name, **connection_args) if servers is None: return None for server in servers: if server.get_servername() == s_name: if s_port is not None and s_port != server.get_port(): ret = None ret = server return ret def servicegroup_exists(sg_name, sg_type=None, **connection_args): ''' Checks if a service group exists CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_exists 'serviceGroupName' ''' sg = _servicegroup_get(sg_name, **connection_args) if sg is None: return False if sg_type is not None and sg_type.upper() != sg.get_servicetype(): return False return True def servicegroup_add(sg_name, sg_type='HTTP', **connection_args): ''' Add a new service group If no service type is specified, HTTP will be used. Most common service types: HTTP, SSL, and SSL_BRIDGE CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_add 'serviceGroupName' salt '*' netscaler.servicegroup_add 'serviceGroupName' 'serviceGroupType' ''' ret = True if servicegroup_exists(sg_name): return False nitro = _connect(**connection_args) if nitro is None: return False sg = NSServiceGroup() sg.set_servicegroupname(sg_name) sg.set_servicetype(sg_type.upper()) try: NSServiceGroup.add(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.add() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_delete(sg_name, **connection_args): ''' Delete a new service group CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_delete 'serviceGroupName' ''' ret = True sg = _servicegroup_get(sg_name, **connection_args) if sg is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.delete(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_exists(sg_name, s_name, s_port=None, **connection_args): ''' Check if a server:port combination is a member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_exists 'serviceGroupName' 'serverName' 'serverPort' ''' return _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) is not None def servicegroup_server_up(sg_name, s_name, s_port, **connection_args): ''' Check if a server:port combination is in state UP in a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_up 'serviceGroupName' 'serverName' 'serverPort' ''' server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) return server is not None and server.get_svrstate() == 'UP' def servicegroup_server_enable(sg_name, s_name, s_port, **connection_args): ''' Enable a server:port member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_enable 'serviceGroupName' 'serverName' 'serverPort' ''' ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.enable_server(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.enable_server() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_disable(sg_name, s_name, s_port, **connection_args): ''' Disable a server:port member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_disable 'serviceGroupName' 'serverName' 'serverPort' ''' ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.disable_server(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.disable_server() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_add(sg_name, s_name, s_port, **connection_args): ''' Add a server:port member to a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_add 'serviceGroupName' 'serverName' 'serverPort' ''' # Nitro will throw an error if the server is already present ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is not None: return False nitro = _connect(**connection_args) if nitro is None: return False sgsb = NSServiceGroupServerBinding() sgsb.set_servicegroupname(sg_name) sgsb.set_servername(s_name) sgsb.set_port(s_port) try: NSServiceGroupServerBinding.add(nitro, sgsb) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroupServerBinding() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_delete(sg_name, s_name, s_port, **connection_args): ''' Remove a server:port member from a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_delete 'serviceGroupName' 'serverName' 'serverPort' ''' # Nitro will throw an error if the server is already not present ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False sgsb = NSServiceGroupServerBinding() sgsb.set_servicegroupname(sg_name) sgsb.set_servername(s_name) sgsb.set_port(s_port) try: NSServiceGroupServerBinding.delete(nitro, sgsb) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroupServerBinding() failed: %s', error) ret = False _disconnect(nitro) return ret def _service_get(s_name, **connection_args): ''' Returns a service ressource or None ''' nitro = _connect(**connection_args) if nitro is None: return None service = NSService() service.set_name(s_name) try: service = NSService.get(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.get() failed: %s', error) service = None _disconnect(nitro) return service def service_exists(s_name, **connection_args): ''' Checks if a service exists CLI Example: .. code-block:: bash salt '*' netscaler.service_exists 'serviceName' ''' return _service_get(s_name, **connection_args) is not None def service_up(s_name, **connection_args): ''' Checks if a service is UP CLI Example: .. code-block:: bash salt '*' netscaler.service_up 'serviceName' ''' service = _service_get(s_name, **connection_args) return service is not None and service.get_svrstate() == 'UP' def service_enable(s_name, **connection_args): ''' Enable a service CLI Example: .. code-block:: bash salt '*' netscaler.service_enable 'serviceName' ''' ret = True service = _service_get(s_name, **connection_args) if service is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSService.enable(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def service_disable(s_name, s_delay=None, **connection_args): ''' Disable a service CLI Example: .. code-block:: bash salt '*' netscaler.service_disable 'serviceName' salt '*' netscaler.service_disable 'serviceName' 'delayInSeconds' ''' ret = True service = _service_get(s_name, **connection_args) if service is None: return False if s_delay is not None: service.set_delay(s_delay) nitro = _connect(**connection_args) if nitro is None: return False try: NSService.disable(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def _server_get(s_name, **connection_args): nitro = _connect(**connection_args) if nitro is None: return None server = NSServer() server.set_name(s_name) try: server = NSServer.get(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.get() failed: %s', error) server = None _disconnect(nitro) return server def server_exists(s_name, ip=None, s_state=None, **connection_args): ''' Checks if a server exists CLI Example: .. code-block:: bash salt '*' netscaler.server_exists 'serverName' ''' server = _server_get(s_name, **connection_args) if server is None: return False if ip is not None and ip != server.get_ipaddress(): return False if s_state is not None and s_state.upper() != server.get_state(): return False return True def server_add(s_name, s_ip, s_state=None, **connection_args): ''' Add a server Note: The default server state is ENABLED CLI Example: .. code-block:: bash salt '*' netscaler.server_add 'serverName' 'serverIpAddress' salt '*' netscaler.server_add 'serverName' 'serverIpAddress' 'serverState' ''' ret = True if server_exists(s_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False server = NSServer() server.set_name(s_name) server.set_ipaddress(s_ip) if s_state is not None: server.set_state(s_state) try: NSServer.add(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.add() failed: %s', error) ret = False _disconnect(nitro) return ret def server_delete(s_name, **connection_args): ''' Delete a server CLI Example: .. code-block:: bash salt '*' netscaler.server_delete 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.delete(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def server_update(s_name, s_ip, **connection_args): ''' Update a server's attributes CLI Example: .. code-block:: bash salt '*' netscaler.server_update 'serverName' 'serverIP' ''' altered = False cur_server = _server_get(s_name, **connection_args) if cur_server is None: return False alt_server = NSServer() alt_server.set_name(s_name) if cur_server.get_ipaddress() != s_ip: alt_server.set_ipaddress(s_ip) altered = True # Nothing to update, the server is already idem if altered is False: return False # Perform the update nitro = _connect(**connection_args) if nitro is None: return False ret = True try: NSServer.update(nitro, alt_server) except NSNitroError as error: log.debug('netscaler module error - NSServer.update() failed: %s', error) ret = False _disconnect(nitro) return ret def server_enabled(s_name, **connection_args): ''' Check if a server is enabled globally CLI Example: .. code-block:: bash salt '*' netscaler.server_enabled 'serverName' ''' server = _server_get(s_name, **connection_args) return server is not None and server.get_state() == 'ENABLED' def server_enable(s_name, **connection_args): ''' Enables a server globally CLI Example: .. code-block:: bash salt '*' netscaler.server_enable 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False if server.get_state() == 'ENABLED': return True nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.enable(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def server_disable(s_name, **connection_args): ''' Disable a server globally CLI Example: .. code-block:: bash salt '*' netscaler.server_disable 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False if server.get_state() == 'DISABLED': return True nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.disable(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.disable() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_get(v_name, **connection_args): nitro = _connect(**connection_args) vserver = NSLBVServer() vserver.set_name(v_name) if nitro is None: return None try: vserver = NSLBVServer.get(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSLBVServer.get() failed: %s', error) vserver = None _disconnect(nitro) return vserver def vserver_exists(v_name, v_ip=None, v_port=None, v_type=None, **connection_args): ''' Checks if a vserver exists CLI Example: .. code-block:: bash salt '*' netscaler.vserver_exists 'vserverName' ''' vserver = _vserver_get(v_name, **connection_args) if vserver is None: return False if v_ip is not None and vserver.get_ipv46() != v_ip: return False if v_port is not None and vserver.get_port() != v_port: return False if v_type is not None and vserver.get_servicetype().upper() != v_type.upper(): return False return True def vserver_add(v_name, v_ip, v_port, v_type, **connection_args): ''' Add a new lb vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_add 'vserverName' 'vserverIP' 'vserverPort' 'vserverType' salt '*' netscaler.vserver_add 'alex.patate.chaude.443' '1.2.3.4' '443' 'SSL' ''' ret = True if vserver_exists(v_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vserver = NSLBVServer() vserver.set_name(v_name) vserver.set_ipv46(v_ip) vserver.set_port(v_port) vserver.set_servicetype(v_type.upper()) try: NSLBVServer.add(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSLBVServer.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_delete(v_name, **connection_args): ''' Delete a lb vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_delete 'vserverName' ''' ret = True vserver = _vserver_get(v_name, **connection_args) if vserver is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSLBVServer.delete(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSVServer.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_servicegroup_get(v_name, sg_name, **connection_args): ret = None nitro = _connect(**connection_args) if nitro is None: return None vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) try: vsgs = NSLBVServerServiceGroupBinding.get(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.get() failed: %s', error) return None for vsg in vsgs: if vsg.get_servicegroupname() == sg_name: ret = vsg _disconnect(nitro) return ret def vserver_servicegroup_exists(v_name, sg_name, **connection_args): ''' Checks if a servicegroup is tied to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_exists 'vserverName' 'serviceGroupName' ''' return _vserver_servicegroup_get(v_name, sg_name, **connection_args) is not None def vserver_servicegroup_add(v_name, sg_name, **connection_args): ''' Bind a servicegroup to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_add 'vserverName' 'serviceGroupName' ''' ret = True if vserver_servicegroup_exists(v_name, sg_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) vsg.set_servicegroupname(sg_name) try: NSLBVServerServiceGroupBinding.add(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_servicegroup_delete(v_name, sg_name, **connection_args): ''' Unbind a servicegroup from a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_delete 'vserverName' 'serviceGroupName' ''' ret = True if not vserver_servicegroup_exists(v_name, sg_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) vsg.set_servicegroupname(sg_name) try: NSLBVServerServiceGroupBinding.delete(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_sslcert_get(v_name, sc_name, **connection_args): ret = None nitro = _connect(**connection_args) if nitro is None: return None sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) try: sslcerts = NSSSLVServerSSLCertKeyBinding.get(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.get() failed: %s', error) return None for sslcert in sslcerts: if sslcert.get_certkeyname() == sc_name: ret = sslcert return ret def vserver_sslcert_exists(v_name, sc_name, **connection_args): ''' Checks if a SSL certificate is tied to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_exists 'vserverName' 'sslCertificateName' ''' return _vserver_sslcert_get(v_name, sc_name, **connection_args) is not None def vserver_sslcert_add(v_name, sc_name, **connection_args): ''' Binds a SSL certificate to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_add 'vserverName' 'sslCertificateName' ''' ret = True if vserver_sslcert_exists(v_name, sc_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) sslcert.set_certkeyname(sc_name) try: NSSSLVServerSSLCertKeyBinding.add(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_sslcert_delete(v_name, sc_name, **connection_args): ''' Unbinds a SSL certificate from a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_delete 'vserverName' 'sslCertificateName' ''' ret = True if not vserver_sslcert_exists(v_name, sc_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) sslcert.set_certkeyname(sc_name) try: NSSSLVServerSSLCertKeyBinding.delete(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.delete() failed: %s', error) ret = False _disconnect(nitro) return ret
saltstack/salt
salt/modules/netscaler.py
_servicegroup_get_servers
python
def _servicegroup_get_servers(sg_name, **connection_args): ''' Returns a list of members of a servicegroup or None ''' nitro = _connect(**connection_args) if nitro is None: return None sg = NSServiceGroup() sg.set_servicegroupname(sg_name) try: sg = NSServiceGroup.get_servers(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.get_servers failed(): %s', error) sg = None _disconnect(nitro) return sg
Returns a list of members of a servicegroup or None
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/netscaler.py#L162-L177
[ "def _connect(**kwargs):\n '''\n Initialise netscaler connection\n '''\n connargs = dict()\n\n # Shamelessy ripped from the mysql module\n def __connarg(name, key=None, default=None):\n '''\n Add key to connargs, only if name exists in our kwargs or as\n netscaler.<name> in __opts__ or __pillar__ Evaluate in said order - kwargs,\n opts then pillar. To avoid collision with other functions, kwargs-based\n connection arguments are prefixed with 'netscaler_' (i.e.\n 'netscaler_host', 'netscaler_user', etc.).\n '''\n if key is None:\n key = name\n if name in kwargs:\n connargs[key] = kwargs[name]\n else:\n prefix = 'netscaler_'\n if name.startswith(prefix):\n try:\n name = name[len(prefix):]\n except IndexError:\n return\n val = __salt__['config.option']('netscaler.{0}'.format(name), None)\n if val is not None:\n connargs[key] = val\n elif default is not None:\n connargs[key] = default\n\n __connarg('netscaler_host', 'host')\n __connarg('netscaler_user', 'user')\n __connarg('netscaler_pass', 'pass')\n __connarg('netscaler_useSSL', 'useSSL', True)\n\n nitro = NSNitro(connargs['host'], connargs['user'], connargs['pass'], connargs['useSSL'])\n try:\n nitro.login()\n except NSNitroError as error:\n log.debug('netscaler module error - NSNitro.login() failed: %s', error)\n return None\n return nitro\n", "def _disconnect(nitro):\n try:\n nitro.logout()\n except NSNitroError as error:\n log.debug('netscaler module error - NSNitro.logout() failed: %s', error)\n return None\n return nitro\n" ]
# -*- coding: utf-8 -*- ''' Module to provide Citrix Netscaler compatibility to Salt (compatible with netscaler 9.2+) .. versionadded:: 2015.2.0 :depends: - nsnitro Python module .. note:: You can install nsnitro using: .. code-block:: bash pip install nsnitro :configuration: This module accepts connection configuration details either as parameters, or as configuration settings in /etc/salt/minion on the relevant minions .. code-block:: yaml netscaler.host: 1.2.3.4 netscaler.user: user netscaler.pass: password This data can also be passed into pillar. Options passed into opts will overwrite options passed into pillar. :CLI Examples: Calls relying on configuration passed using /etc/salt/minion, grains, or pillars: .. code-block:: bash salt-call netscaler.server_exists server_name Calls passing configuration as opts .. code-block:: bash salt-call netscaler.server_exists server_name netscaler_host=1.2.3.4 netscaler_user=username netscaler_pass=password salt-call netscaler.server_exists server_name netscaler_host=1.2.3.5 netscaler_user=username2 netscaler_pass=password2 salt-call netscaler.server_enable server_name2 netscaler_host=1.2.3.5 salt-call netscaler.server_up server_name3 netscaler_host=1.2.3.6 netscaler_useSSL=False ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import logging # Import Salt libs import salt.utils.platform try: from nsnitro.nsnitro import NSNitro from nsnitro.nsexceptions import NSNitroError from nsnitro.nsresources.nsserver import NSServer from nsnitro.nsresources.nsservice import NSService from nsnitro.nsresources.nsservicegroup import NSServiceGroup from nsnitro.nsresources.nsservicegroupserverbinding import NSServiceGroupServerBinding from nsnitro.nsresources.nslbvserver import NSLBVServer from nsnitro.nsresources.nslbvserverservicegroupbinding import NSLBVServerServiceGroupBinding from nsnitro.nsresources.nssslvserversslcertkeybinding import NSSSLVServerSSLCertKeyBinding HAS_NSNITRO = True except ImportError: HAS_NSNITRO = False log = logging.getLogger(__name__) def __virtual__(): ''' Only load this module if the nsnitro library is installed ''' if salt.utils.platform.is_windows(): return ( False, 'The netscaler execution module failed to load: not available ' 'on Windows.' ) if HAS_NSNITRO: return 'netscaler' return ( False, 'The netscaler execution module failed to load: the nsnitro python ' 'library is not available.' ) def _connect(**kwargs): ''' Initialise netscaler connection ''' connargs = dict() # Shamelessy ripped from the mysql module def __connarg(name, key=None, default=None): ''' Add key to connargs, only if name exists in our kwargs or as netscaler.<name> in __opts__ or __pillar__ Evaluate in said order - kwargs, opts then pillar. To avoid collision with other functions, kwargs-based connection arguments are prefixed with 'netscaler_' (i.e. 'netscaler_host', 'netscaler_user', etc.). ''' if key is None: key = name if name in kwargs: connargs[key] = kwargs[name] else: prefix = 'netscaler_' if name.startswith(prefix): try: name = name[len(prefix):] except IndexError: return val = __salt__['config.option']('netscaler.{0}'.format(name), None) if val is not None: connargs[key] = val elif default is not None: connargs[key] = default __connarg('netscaler_host', 'host') __connarg('netscaler_user', 'user') __connarg('netscaler_pass', 'pass') __connarg('netscaler_useSSL', 'useSSL', True) nitro = NSNitro(connargs['host'], connargs['user'], connargs['pass'], connargs['useSSL']) try: nitro.login() except NSNitroError as error: log.debug('netscaler module error - NSNitro.login() failed: %s', error) return None return nitro def _disconnect(nitro): try: nitro.logout() except NSNitroError as error: log.debug('netscaler module error - NSNitro.logout() failed: %s', error) return None return nitro def _servicegroup_get(sg_name, **connection_args): ''' Return a service group ressource or None ''' nitro = _connect(**connection_args) if nitro is None: return None sg = NSServiceGroup() sg.set_servicegroupname(sg_name) try: sg = NSServiceGroup.get(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.get() failed: %s', error) sg = None _disconnect(nitro) return sg def _servicegroup_get_server(sg_name, s_name, s_port=None, **connection_args): ''' Returns a member of a service group or None ''' ret = None servers = _servicegroup_get_servers(sg_name, **connection_args) if servers is None: return None for server in servers: if server.get_servername() == s_name: if s_port is not None and s_port != server.get_port(): ret = None ret = server return ret def servicegroup_exists(sg_name, sg_type=None, **connection_args): ''' Checks if a service group exists CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_exists 'serviceGroupName' ''' sg = _servicegroup_get(sg_name, **connection_args) if sg is None: return False if sg_type is not None and sg_type.upper() != sg.get_servicetype(): return False return True def servicegroup_add(sg_name, sg_type='HTTP', **connection_args): ''' Add a new service group If no service type is specified, HTTP will be used. Most common service types: HTTP, SSL, and SSL_BRIDGE CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_add 'serviceGroupName' salt '*' netscaler.servicegroup_add 'serviceGroupName' 'serviceGroupType' ''' ret = True if servicegroup_exists(sg_name): return False nitro = _connect(**connection_args) if nitro is None: return False sg = NSServiceGroup() sg.set_servicegroupname(sg_name) sg.set_servicetype(sg_type.upper()) try: NSServiceGroup.add(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.add() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_delete(sg_name, **connection_args): ''' Delete a new service group CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_delete 'serviceGroupName' ''' ret = True sg = _servicegroup_get(sg_name, **connection_args) if sg is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.delete(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_exists(sg_name, s_name, s_port=None, **connection_args): ''' Check if a server:port combination is a member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_exists 'serviceGroupName' 'serverName' 'serverPort' ''' return _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) is not None def servicegroup_server_up(sg_name, s_name, s_port, **connection_args): ''' Check if a server:port combination is in state UP in a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_up 'serviceGroupName' 'serverName' 'serverPort' ''' server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) return server is not None and server.get_svrstate() == 'UP' def servicegroup_server_enable(sg_name, s_name, s_port, **connection_args): ''' Enable a server:port member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_enable 'serviceGroupName' 'serverName' 'serverPort' ''' ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.enable_server(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.enable_server() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_disable(sg_name, s_name, s_port, **connection_args): ''' Disable a server:port member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_disable 'serviceGroupName' 'serverName' 'serverPort' ''' ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.disable_server(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.disable_server() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_add(sg_name, s_name, s_port, **connection_args): ''' Add a server:port member to a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_add 'serviceGroupName' 'serverName' 'serverPort' ''' # Nitro will throw an error if the server is already present ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is not None: return False nitro = _connect(**connection_args) if nitro is None: return False sgsb = NSServiceGroupServerBinding() sgsb.set_servicegroupname(sg_name) sgsb.set_servername(s_name) sgsb.set_port(s_port) try: NSServiceGroupServerBinding.add(nitro, sgsb) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroupServerBinding() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_delete(sg_name, s_name, s_port, **connection_args): ''' Remove a server:port member from a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_delete 'serviceGroupName' 'serverName' 'serverPort' ''' # Nitro will throw an error if the server is already not present ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False sgsb = NSServiceGroupServerBinding() sgsb.set_servicegroupname(sg_name) sgsb.set_servername(s_name) sgsb.set_port(s_port) try: NSServiceGroupServerBinding.delete(nitro, sgsb) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroupServerBinding() failed: %s', error) ret = False _disconnect(nitro) return ret def _service_get(s_name, **connection_args): ''' Returns a service ressource or None ''' nitro = _connect(**connection_args) if nitro is None: return None service = NSService() service.set_name(s_name) try: service = NSService.get(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.get() failed: %s', error) service = None _disconnect(nitro) return service def service_exists(s_name, **connection_args): ''' Checks if a service exists CLI Example: .. code-block:: bash salt '*' netscaler.service_exists 'serviceName' ''' return _service_get(s_name, **connection_args) is not None def service_up(s_name, **connection_args): ''' Checks if a service is UP CLI Example: .. code-block:: bash salt '*' netscaler.service_up 'serviceName' ''' service = _service_get(s_name, **connection_args) return service is not None and service.get_svrstate() == 'UP' def service_enable(s_name, **connection_args): ''' Enable a service CLI Example: .. code-block:: bash salt '*' netscaler.service_enable 'serviceName' ''' ret = True service = _service_get(s_name, **connection_args) if service is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSService.enable(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def service_disable(s_name, s_delay=None, **connection_args): ''' Disable a service CLI Example: .. code-block:: bash salt '*' netscaler.service_disable 'serviceName' salt '*' netscaler.service_disable 'serviceName' 'delayInSeconds' ''' ret = True service = _service_get(s_name, **connection_args) if service is None: return False if s_delay is not None: service.set_delay(s_delay) nitro = _connect(**connection_args) if nitro is None: return False try: NSService.disable(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def _server_get(s_name, **connection_args): nitro = _connect(**connection_args) if nitro is None: return None server = NSServer() server.set_name(s_name) try: server = NSServer.get(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.get() failed: %s', error) server = None _disconnect(nitro) return server def server_exists(s_name, ip=None, s_state=None, **connection_args): ''' Checks if a server exists CLI Example: .. code-block:: bash salt '*' netscaler.server_exists 'serverName' ''' server = _server_get(s_name, **connection_args) if server is None: return False if ip is not None and ip != server.get_ipaddress(): return False if s_state is not None and s_state.upper() != server.get_state(): return False return True def server_add(s_name, s_ip, s_state=None, **connection_args): ''' Add a server Note: The default server state is ENABLED CLI Example: .. code-block:: bash salt '*' netscaler.server_add 'serverName' 'serverIpAddress' salt '*' netscaler.server_add 'serverName' 'serverIpAddress' 'serverState' ''' ret = True if server_exists(s_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False server = NSServer() server.set_name(s_name) server.set_ipaddress(s_ip) if s_state is not None: server.set_state(s_state) try: NSServer.add(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.add() failed: %s', error) ret = False _disconnect(nitro) return ret def server_delete(s_name, **connection_args): ''' Delete a server CLI Example: .. code-block:: bash salt '*' netscaler.server_delete 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.delete(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def server_update(s_name, s_ip, **connection_args): ''' Update a server's attributes CLI Example: .. code-block:: bash salt '*' netscaler.server_update 'serverName' 'serverIP' ''' altered = False cur_server = _server_get(s_name, **connection_args) if cur_server is None: return False alt_server = NSServer() alt_server.set_name(s_name) if cur_server.get_ipaddress() != s_ip: alt_server.set_ipaddress(s_ip) altered = True # Nothing to update, the server is already idem if altered is False: return False # Perform the update nitro = _connect(**connection_args) if nitro is None: return False ret = True try: NSServer.update(nitro, alt_server) except NSNitroError as error: log.debug('netscaler module error - NSServer.update() failed: %s', error) ret = False _disconnect(nitro) return ret def server_enabled(s_name, **connection_args): ''' Check if a server is enabled globally CLI Example: .. code-block:: bash salt '*' netscaler.server_enabled 'serverName' ''' server = _server_get(s_name, **connection_args) return server is not None and server.get_state() == 'ENABLED' def server_enable(s_name, **connection_args): ''' Enables a server globally CLI Example: .. code-block:: bash salt '*' netscaler.server_enable 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False if server.get_state() == 'ENABLED': return True nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.enable(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def server_disable(s_name, **connection_args): ''' Disable a server globally CLI Example: .. code-block:: bash salt '*' netscaler.server_disable 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False if server.get_state() == 'DISABLED': return True nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.disable(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.disable() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_get(v_name, **connection_args): nitro = _connect(**connection_args) vserver = NSLBVServer() vserver.set_name(v_name) if nitro is None: return None try: vserver = NSLBVServer.get(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSLBVServer.get() failed: %s', error) vserver = None _disconnect(nitro) return vserver def vserver_exists(v_name, v_ip=None, v_port=None, v_type=None, **connection_args): ''' Checks if a vserver exists CLI Example: .. code-block:: bash salt '*' netscaler.vserver_exists 'vserverName' ''' vserver = _vserver_get(v_name, **connection_args) if vserver is None: return False if v_ip is not None and vserver.get_ipv46() != v_ip: return False if v_port is not None and vserver.get_port() != v_port: return False if v_type is not None and vserver.get_servicetype().upper() != v_type.upper(): return False return True def vserver_add(v_name, v_ip, v_port, v_type, **connection_args): ''' Add a new lb vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_add 'vserverName' 'vserverIP' 'vserverPort' 'vserverType' salt '*' netscaler.vserver_add 'alex.patate.chaude.443' '1.2.3.4' '443' 'SSL' ''' ret = True if vserver_exists(v_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vserver = NSLBVServer() vserver.set_name(v_name) vserver.set_ipv46(v_ip) vserver.set_port(v_port) vserver.set_servicetype(v_type.upper()) try: NSLBVServer.add(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSLBVServer.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_delete(v_name, **connection_args): ''' Delete a lb vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_delete 'vserverName' ''' ret = True vserver = _vserver_get(v_name, **connection_args) if vserver is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSLBVServer.delete(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSVServer.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_servicegroup_get(v_name, sg_name, **connection_args): ret = None nitro = _connect(**connection_args) if nitro is None: return None vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) try: vsgs = NSLBVServerServiceGroupBinding.get(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.get() failed: %s', error) return None for vsg in vsgs: if vsg.get_servicegroupname() == sg_name: ret = vsg _disconnect(nitro) return ret def vserver_servicegroup_exists(v_name, sg_name, **connection_args): ''' Checks if a servicegroup is tied to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_exists 'vserverName' 'serviceGroupName' ''' return _vserver_servicegroup_get(v_name, sg_name, **connection_args) is not None def vserver_servicegroup_add(v_name, sg_name, **connection_args): ''' Bind a servicegroup to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_add 'vserverName' 'serviceGroupName' ''' ret = True if vserver_servicegroup_exists(v_name, sg_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) vsg.set_servicegroupname(sg_name) try: NSLBVServerServiceGroupBinding.add(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_servicegroup_delete(v_name, sg_name, **connection_args): ''' Unbind a servicegroup from a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_delete 'vserverName' 'serviceGroupName' ''' ret = True if not vserver_servicegroup_exists(v_name, sg_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) vsg.set_servicegroupname(sg_name) try: NSLBVServerServiceGroupBinding.delete(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_sslcert_get(v_name, sc_name, **connection_args): ret = None nitro = _connect(**connection_args) if nitro is None: return None sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) try: sslcerts = NSSSLVServerSSLCertKeyBinding.get(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.get() failed: %s', error) return None for sslcert in sslcerts: if sslcert.get_certkeyname() == sc_name: ret = sslcert return ret def vserver_sslcert_exists(v_name, sc_name, **connection_args): ''' Checks if a SSL certificate is tied to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_exists 'vserverName' 'sslCertificateName' ''' return _vserver_sslcert_get(v_name, sc_name, **connection_args) is not None def vserver_sslcert_add(v_name, sc_name, **connection_args): ''' Binds a SSL certificate to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_add 'vserverName' 'sslCertificateName' ''' ret = True if vserver_sslcert_exists(v_name, sc_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) sslcert.set_certkeyname(sc_name) try: NSSSLVServerSSLCertKeyBinding.add(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_sslcert_delete(v_name, sc_name, **connection_args): ''' Unbinds a SSL certificate from a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_delete 'vserverName' 'sslCertificateName' ''' ret = True if not vserver_sslcert_exists(v_name, sc_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) sslcert.set_certkeyname(sc_name) try: NSSSLVServerSSLCertKeyBinding.delete(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.delete() failed: %s', error) ret = False _disconnect(nitro) return ret
saltstack/salt
salt/modules/netscaler.py
_servicegroup_get_server
python
def _servicegroup_get_server(sg_name, s_name, s_port=None, **connection_args): ''' Returns a member of a service group or None ''' ret = None servers = _servicegroup_get_servers(sg_name, **connection_args) if servers is None: return None for server in servers: if server.get_servername() == s_name: if s_port is not None and s_port != server.get_port(): ret = None ret = server return ret
Returns a member of a service group or None
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/netscaler.py#L180-L193
[ "def _servicegroup_get_servers(sg_name, **connection_args):\n '''\n Returns a list of members of a servicegroup or None\n '''\n nitro = _connect(**connection_args)\n if nitro is None:\n return None\n sg = NSServiceGroup()\n sg.set_servicegroupname(sg_name)\n try:\n sg = NSServiceGroup.get_servers(nitro, sg)\n except NSNitroError as error:\n log.debug('netscaler module error - NSServiceGroup.get_servers failed(): %s', error)\n sg = None\n _disconnect(nitro)\n return sg\n" ]
# -*- coding: utf-8 -*- ''' Module to provide Citrix Netscaler compatibility to Salt (compatible with netscaler 9.2+) .. versionadded:: 2015.2.0 :depends: - nsnitro Python module .. note:: You can install nsnitro using: .. code-block:: bash pip install nsnitro :configuration: This module accepts connection configuration details either as parameters, or as configuration settings in /etc/salt/minion on the relevant minions .. code-block:: yaml netscaler.host: 1.2.3.4 netscaler.user: user netscaler.pass: password This data can also be passed into pillar. Options passed into opts will overwrite options passed into pillar. :CLI Examples: Calls relying on configuration passed using /etc/salt/minion, grains, or pillars: .. code-block:: bash salt-call netscaler.server_exists server_name Calls passing configuration as opts .. code-block:: bash salt-call netscaler.server_exists server_name netscaler_host=1.2.3.4 netscaler_user=username netscaler_pass=password salt-call netscaler.server_exists server_name netscaler_host=1.2.3.5 netscaler_user=username2 netscaler_pass=password2 salt-call netscaler.server_enable server_name2 netscaler_host=1.2.3.5 salt-call netscaler.server_up server_name3 netscaler_host=1.2.3.6 netscaler_useSSL=False ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import logging # Import Salt libs import salt.utils.platform try: from nsnitro.nsnitro import NSNitro from nsnitro.nsexceptions import NSNitroError from nsnitro.nsresources.nsserver import NSServer from nsnitro.nsresources.nsservice import NSService from nsnitro.nsresources.nsservicegroup import NSServiceGroup from nsnitro.nsresources.nsservicegroupserverbinding import NSServiceGroupServerBinding from nsnitro.nsresources.nslbvserver import NSLBVServer from nsnitro.nsresources.nslbvserverservicegroupbinding import NSLBVServerServiceGroupBinding from nsnitro.nsresources.nssslvserversslcertkeybinding import NSSSLVServerSSLCertKeyBinding HAS_NSNITRO = True except ImportError: HAS_NSNITRO = False log = logging.getLogger(__name__) def __virtual__(): ''' Only load this module if the nsnitro library is installed ''' if salt.utils.platform.is_windows(): return ( False, 'The netscaler execution module failed to load: not available ' 'on Windows.' ) if HAS_NSNITRO: return 'netscaler' return ( False, 'The netscaler execution module failed to load: the nsnitro python ' 'library is not available.' ) def _connect(**kwargs): ''' Initialise netscaler connection ''' connargs = dict() # Shamelessy ripped from the mysql module def __connarg(name, key=None, default=None): ''' Add key to connargs, only if name exists in our kwargs or as netscaler.<name> in __opts__ or __pillar__ Evaluate in said order - kwargs, opts then pillar. To avoid collision with other functions, kwargs-based connection arguments are prefixed with 'netscaler_' (i.e. 'netscaler_host', 'netscaler_user', etc.). ''' if key is None: key = name if name in kwargs: connargs[key] = kwargs[name] else: prefix = 'netscaler_' if name.startswith(prefix): try: name = name[len(prefix):] except IndexError: return val = __salt__['config.option']('netscaler.{0}'.format(name), None) if val is not None: connargs[key] = val elif default is not None: connargs[key] = default __connarg('netscaler_host', 'host') __connarg('netscaler_user', 'user') __connarg('netscaler_pass', 'pass') __connarg('netscaler_useSSL', 'useSSL', True) nitro = NSNitro(connargs['host'], connargs['user'], connargs['pass'], connargs['useSSL']) try: nitro.login() except NSNitroError as error: log.debug('netscaler module error - NSNitro.login() failed: %s', error) return None return nitro def _disconnect(nitro): try: nitro.logout() except NSNitroError as error: log.debug('netscaler module error - NSNitro.logout() failed: %s', error) return None return nitro def _servicegroup_get(sg_name, **connection_args): ''' Return a service group ressource or None ''' nitro = _connect(**connection_args) if nitro is None: return None sg = NSServiceGroup() sg.set_servicegroupname(sg_name) try: sg = NSServiceGroup.get(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.get() failed: %s', error) sg = None _disconnect(nitro) return sg def _servicegroup_get_servers(sg_name, **connection_args): ''' Returns a list of members of a servicegroup or None ''' nitro = _connect(**connection_args) if nitro is None: return None sg = NSServiceGroup() sg.set_servicegroupname(sg_name) try: sg = NSServiceGroup.get_servers(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.get_servers failed(): %s', error) sg = None _disconnect(nitro) return sg def servicegroup_exists(sg_name, sg_type=None, **connection_args): ''' Checks if a service group exists CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_exists 'serviceGroupName' ''' sg = _servicegroup_get(sg_name, **connection_args) if sg is None: return False if sg_type is not None and sg_type.upper() != sg.get_servicetype(): return False return True def servicegroup_add(sg_name, sg_type='HTTP', **connection_args): ''' Add a new service group If no service type is specified, HTTP will be used. Most common service types: HTTP, SSL, and SSL_BRIDGE CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_add 'serviceGroupName' salt '*' netscaler.servicegroup_add 'serviceGroupName' 'serviceGroupType' ''' ret = True if servicegroup_exists(sg_name): return False nitro = _connect(**connection_args) if nitro is None: return False sg = NSServiceGroup() sg.set_servicegroupname(sg_name) sg.set_servicetype(sg_type.upper()) try: NSServiceGroup.add(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.add() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_delete(sg_name, **connection_args): ''' Delete a new service group CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_delete 'serviceGroupName' ''' ret = True sg = _servicegroup_get(sg_name, **connection_args) if sg is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.delete(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_exists(sg_name, s_name, s_port=None, **connection_args): ''' Check if a server:port combination is a member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_exists 'serviceGroupName' 'serverName' 'serverPort' ''' return _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) is not None def servicegroup_server_up(sg_name, s_name, s_port, **connection_args): ''' Check if a server:port combination is in state UP in a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_up 'serviceGroupName' 'serverName' 'serverPort' ''' server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) return server is not None and server.get_svrstate() == 'UP' def servicegroup_server_enable(sg_name, s_name, s_port, **connection_args): ''' Enable a server:port member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_enable 'serviceGroupName' 'serverName' 'serverPort' ''' ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.enable_server(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.enable_server() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_disable(sg_name, s_name, s_port, **connection_args): ''' Disable a server:port member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_disable 'serviceGroupName' 'serverName' 'serverPort' ''' ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.disable_server(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.disable_server() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_add(sg_name, s_name, s_port, **connection_args): ''' Add a server:port member to a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_add 'serviceGroupName' 'serverName' 'serverPort' ''' # Nitro will throw an error if the server is already present ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is not None: return False nitro = _connect(**connection_args) if nitro is None: return False sgsb = NSServiceGroupServerBinding() sgsb.set_servicegroupname(sg_name) sgsb.set_servername(s_name) sgsb.set_port(s_port) try: NSServiceGroupServerBinding.add(nitro, sgsb) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroupServerBinding() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_delete(sg_name, s_name, s_port, **connection_args): ''' Remove a server:port member from a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_delete 'serviceGroupName' 'serverName' 'serverPort' ''' # Nitro will throw an error if the server is already not present ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False sgsb = NSServiceGroupServerBinding() sgsb.set_servicegroupname(sg_name) sgsb.set_servername(s_name) sgsb.set_port(s_port) try: NSServiceGroupServerBinding.delete(nitro, sgsb) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroupServerBinding() failed: %s', error) ret = False _disconnect(nitro) return ret def _service_get(s_name, **connection_args): ''' Returns a service ressource or None ''' nitro = _connect(**connection_args) if nitro is None: return None service = NSService() service.set_name(s_name) try: service = NSService.get(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.get() failed: %s', error) service = None _disconnect(nitro) return service def service_exists(s_name, **connection_args): ''' Checks if a service exists CLI Example: .. code-block:: bash salt '*' netscaler.service_exists 'serviceName' ''' return _service_get(s_name, **connection_args) is not None def service_up(s_name, **connection_args): ''' Checks if a service is UP CLI Example: .. code-block:: bash salt '*' netscaler.service_up 'serviceName' ''' service = _service_get(s_name, **connection_args) return service is not None and service.get_svrstate() == 'UP' def service_enable(s_name, **connection_args): ''' Enable a service CLI Example: .. code-block:: bash salt '*' netscaler.service_enable 'serviceName' ''' ret = True service = _service_get(s_name, **connection_args) if service is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSService.enable(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def service_disable(s_name, s_delay=None, **connection_args): ''' Disable a service CLI Example: .. code-block:: bash salt '*' netscaler.service_disable 'serviceName' salt '*' netscaler.service_disable 'serviceName' 'delayInSeconds' ''' ret = True service = _service_get(s_name, **connection_args) if service is None: return False if s_delay is not None: service.set_delay(s_delay) nitro = _connect(**connection_args) if nitro is None: return False try: NSService.disable(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def _server_get(s_name, **connection_args): nitro = _connect(**connection_args) if nitro is None: return None server = NSServer() server.set_name(s_name) try: server = NSServer.get(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.get() failed: %s', error) server = None _disconnect(nitro) return server def server_exists(s_name, ip=None, s_state=None, **connection_args): ''' Checks if a server exists CLI Example: .. code-block:: bash salt '*' netscaler.server_exists 'serverName' ''' server = _server_get(s_name, **connection_args) if server is None: return False if ip is not None and ip != server.get_ipaddress(): return False if s_state is not None and s_state.upper() != server.get_state(): return False return True def server_add(s_name, s_ip, s_state=None, **connection_args): ''' Add a server Note: The default server state is ENABLED CLI Example: .. code-block:: bash salt '*' netscaler.server_add 'serverName' 'serverIpAddress' salt '*' netscaler.server_add 'serverName' 'serverIpAddress' 'serverState' ''' ret = True if server_exists(s_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False server = NSServer() server.set_name(s_name) server.set_ipaddress(s_ip) if s_state is not None: server.set_state(s_state) try: NSServer.add(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.add() failed: %s', error) ret = False _disconnect(nitro) return ret def server_delete(s_name, **connection_args): ''' Delete a server CLI Example: .. code-block:: bash salt '*' netscaler.server_delete 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.delete(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def server_update(s_name, s_ip, **connection_args): ''' Update a server's attributes CLI Example: .. code-block:: bash salt '*' netscaler.server_update 'serverName' 'serverIP' ''' altered = False cur_server = _server_get(s_name, **connection_args) if cur_server is None: return False alt_server = NSServer() alt_server.set_name(s_name) if cur_server.get_ipaddress() != s_ip: alt_server.set_ipaddress(s_ip) altered = True # Nothing to update, the server is already idem if altered is False: return False # Perform the update nitro = _connect(**connection_args) if nitro is None: return False ret = True try: NSServer.update(nitro, alt_server) except NSNitroError as error: log.debug('netscaler module error - NSServer.update() failed: %s', error) ret = False _disconnect(nitro) return ret def server_enabled(s_name, **connection_args): ''' Check if a server is enabled globally CLI Example: .. code-block:: bash salt '*' netscaler.server_enabled 'serverName' ''' server = _server_get(s_name, **connection_args) return server is not None and server.get_state() == 'ENABLED' def server_enable(s_name, **connection_args): ''' Enables a server globally CLI Example: .. code-block:: bash salt '*' netscaler.server_enable 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False if server.get_state() == 'ENABLED': return True nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.enable(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def server_disable(s_name, **connection_args): ''' Disable a server globally CLI Example: .. code-block:: bash salt '*' netscaler.server_disable 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False if server.get_state() == 'DISABLED': return True nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.disable(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.disable() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_get(v_name, **connection_args): nitro = _connect(**connection_args) vserver = NSLBVServer() vserver.set_name(v_name) if nitro is None: return None try: vserver = NSLBVServer.get(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSLBVServer.get() failed: %s', error) vserver = None _disconnect(nitro) return vserver def vserver_exists(v_name, v_ip=None, v_port=None, v_type=None, **connection_args): ''' Checks if a vserver exists CLI Example: .. code-block:: bash salt '*' netscaler.vserver_exists 'vserverName' ''' vserver = _vserver_get(v_name, **connection_args) if vserver is None: return False if v_ip is not None and vserver.get_ipv46() != v_ip: return False if v_port is not None and vserver.get_port() != v_port: return False if v_type is not None and vserver.get_servicetype().upper() != v_type.upper(): return False return True def vserver_add(v_name, v_ip, v_port, v_type, **connection_args): ''' Add a new lb vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_add 'vserverName' 'vserverIP' 'vserverPort' 'vserverType' salt '*' netscaler.vserver_add 'alex.patate.chaude.443' '1.2.3.4' '443' 'SSL' ''' ret = True if vserver_exists(v_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vserver = NSLBVServer() vserver.set_name(v_name) vserver.set_ipv46(v_ip) vserver.set_port(v_port) vserver.set_servicetype(v_type.upper()) try: NSLBVServer.add(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSLBVServer.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_delete(v_name, **connection_args): ''' Delete a lb vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_delete 'vserverName' ''' ret = True vserver = _vserver_get(v_name, **connection_args) if vserver is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSLBVServer.delete(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSVServer.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_servicegroup_get(v_name, sg_name, **connection_args): ret = None nitro = _connect(**connection_args) if nitro is None: return None vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) try: vsgs = NSLBVServerServiceGroupBinding.get(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.get() failed: %s', error) return None for vsg in vsgs: if vsg.get_servicegroupname() == sg_name: ret = vsg _disconnect(nitro) return ret def vserver_servicegroup_exists(v_name, sg_name, **connection_args): ''' Checks if a servicegroup is tied to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_exists 'vserverName' 'serviceGroupName' ''' return _vserver_servicegroup_get(v_name, sg_name, **connection_args) is not None def vserver_servicegroup_add(v_name, sg_name, **connection_args): ''' Bind a servicegroup to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_add 'vserverName' 'serviceGroupName' ''' ret = True if vserver_servicegroup_exists(v_name, sg_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) vsg.set_servicegroupname(sg_name) try: NSLBVServerServiceGroupBinding.add(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_servicegroup_delete(v_name, sg_name, **connection_args): ''' Unbind a servicegroup from a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_delete 'vserverName' 'serviceGroupName' ''' ret = True if not vserver_servicegroup_exists(v_name, sg_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) vsg.set_servicegroupname(sg_name) try: NSLBVServerServiceGroupBinding.delete(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_sslcert_get(v_name, sc_name, **connection_args): ret = None nitro = _connect(**connection_args) if nitro is None: return None sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) try: sslcerts = NSSSLVServerSSLCertKeyBinding.get(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.get() failed: %s', error) return None for sslcert in sslcerts: if sslcert.get_certkeyname() == sc_name: ret = sslcert return ret def vserver_sslcert_exists(v_name, sc_name, **connection_args): ''' Checks if a SSL certificate is tied to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_exists 'vserverName' 'sslCertificateName' ''' return _vserver_sslcert_get(v_name, sc_name, **connection_args) is not None def vserver_sslcert_add(v_name, sc_name, **connection_args): ''' Binds a SSL certificate to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_add 'vserverName' 'sslCertificateName' ''' ret = True if vserver_sslcert_exists(v_name, sc_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) sslcert.set_certkeyname(sc_name) try: NSSSLVServerSSLCertKeyBinding.add(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_sslcert_delete(v_name, sc_name, **connection_args): ''' Unbinds a SSL certificate from a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_delete 'vserverName' 'sslCertificateName' ''' ret = True if not vserver_sslcert_exists(v_name, sc_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) sslcert.set_certkeyname(sc_name) try: NSSSLVServerSSLCertKeyBinding.delete(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.delete() failed: %s', error) ret = False _disconnect(nitro) return ret
saltstack/salt
salt/modules/netscaler.py
servicegroup_exists
python
def servicegroup_exists(sg_name, sg_type=None, **connection_args): ''' Checks if a service group exists CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_exists 'serviceGroupName' ''' sg = _servicegroup_get(sg_name, **connection_args) if sg is None: return False if sg_type is not None and sg_type.upper() != sg.get_servicetype(): return False return True
Checks if a service group exists CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_exists 'serviceGroupName'
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/netscaler.py#L196-L211
[ "def _servicegroup_get(sg_name, **connection_args):\n '''\n Return a service group ressource or None\n '''\n nitro = _connect(**connection_args)\n if nitro is None:\n return None\n sg = NSServiceGroup()\n sg.set_servicegroupname(sg_name)\n try:\n sg = NSServiceGroup.get(nitro, sg)\n except NSNitroError as error:\n log.debug('netscaler module error - NSServiceGroup.get() failed: %s', error)\n sg = None\n _disconnect(nitro)\n return sg\n" ]
# -*- coding: utf-8 -*- ''' Module to provide Citrix Netscaler compatibility to Salt (compatible with netscaler 9.2+) .. versionadded:: 2015.2.0 :depends: - nsnitro Python module .. note:: You can install nsnitro using: .. code-block:: bash pip install nsnitro :configuration: This module accepts connection configuration details either as parameters, or as configuration settings in /etc/salt/minion on the relevant minions .. code-block:: yaml netscaler.host: 1.2.3.4 netscaler.user: user netscaler.pass: password This data can also be passed into pillar. Options passed into opts will overwrite options passed into pillar. :CLI Examples: Calls relying on configuration passed using /etc/salt/minion, grains, or pillars: .. code-block:: bash salt-call netscaler.server_exists server_name Calls passing configuration as opts .. code-block:: bash salt-call netscaler.server_exists server_name netscaler_host=1.2.3.4 netscaler_user=username netscaler_pass=password salt-call netscaler.server_exists server_name netscaler_host=1.2.3.5 netscaler_user=username2 netscaler_pass=password2 salt-call netscaler.server_enable server_name2 netscaler_host=1.2.3.5 salt-call netscaler.server_up server_name3 netscaler_host=1.2.3.6 netscaler_useSSL=False ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import logging # Import Salt libs import salt.utils.platform try: from nsnitro.nsnitro import NSNitro from nsnitro.nsexceptions import NSNitroError from nsnitro.nsresources.nsserver import NSServer from nsnitro.nsresources.nsservice import NSService from nsnitro.nsresources.nsservicegroup import NSServiceGroup from nsnitro.nsresources.nsservicegroupserverbinding import NSServiceGroupServerBinding from nsnitro.nsresources.nslbvserver import NSLBVServer from nsnitro.nsresources.nslbvserverservicegroupbinding import NSLBVServerServiceGroupBinding from nsnitro.nsresources.nssslvserversslcertkeybinding import NSSSLVServerSSLCertKeyBinding HAS_NSNITRO = True except ImportError: HAS_NSNITRO = False log = logging.getLogger(__name__) def __virtual__(): ''' Only load this module if the nsnitro library is installed ''' if salt.utils.platform.is_windows(): return ( False, 'The netscaler execution module failed to load: not available ' 'on Windows.' ) if HAS_NSNITRO: return 'netscaler' return ( False, 'The netscaler execution module failed to load: the nsnitro python ' 'library is not available.' ) def _connect(**kwargs): ''' Initialise netscaler connection ''' connargs = dict() # Shamelessy ripped from the mysql module def __connarg(name, key=None, default=None): ''' Add key to connargs, only if name exists in our kwargs or as netscaler.<name> in __opts__ or __pillar__ Evaluate in said order - kwargs, opts then pillar. To avoid collision with other functions, kwargs-based connection arguments are prefixed with 'netscaler_' (i.e. 'netscaler_host', 'netscaler_user', etc.). ''' if key is None: key = name if name in kwargs: connargs[key] = kwargs[name] else: prefix = 'netscaler_' if name.startswith(prefix): try: name = name[len(prefix):] except IndexError: return val = __salt__['config.option']('netscaler.{0}'.format(name), None) if val is not None: connargs[key] = val elif default is not None: connargs[key] = default __connarg('netscaler_host', 'host') __connarg('netscaler_user', 'user') __connarg('netscaler_pass', 'pass') __connarg('netscaler_useSSL', 'useSSL', True) nitro = NSNitro(connargs['host'], connargs['user'], connargs['pass'], connargs['useSSL']) try: nitro.login() except NSNitroError as error: log.debug('netscaler module error - NSNitro.login() failed: %s', error) return None return nitro def _disconnect(nitro): try: nitro.logout() except NSNitroError as error: log.debug('netscaler module error - NSNitro.logout() failed: %s', error) return None return nitro def _servicegroup_get(sg_name, **connection_args): ''' Return a service group ressource or None ''' nitro = _connect(**connection_args) if nitro is None: return None sg = NSServiceGroup() sg.set_servicegroupname(sg_name) try: sg = NSServiceGroup.get(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.get() failed: %s', error) sg = None _disconnect(nitro) return sg def _servicegroup_get_servers(sg_name, **connection_args): ''' Returns a list of members of a servicegroup or None ''' nitro = _connect(**connection_args) if nitro is None: return None sg = NSServiceGroup() sg.set_servicegroupname(sg_name) try: sg = NSServiceGroup.get_servers(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.get_servers failed(): %s', error) sg = None _disconnect(nitro) return sg def _servicegroup_get_server(sg_name, s_name, s_port=None, **connection_args): ''' Returns a member of a service group or None ''' ret = None servers = _servicegroup_get_servers(sg_name, **connection_args) if servers is None: return None for server in servers: if server.get_servername() == s_name: if s_port is not None and s_port != server.get_port(): ret = None ret = server return ret def servicegroup_add(sg_name, sg_type='HTTP', **connection_args): ''' Add a new service group If no service type is specified, HTTP will be used. Most common service types: HTTP, SSL, and SSL_BRIDGE CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_add 'serviceGroupName' salt '*' netscaler.servicegroup_add 'serviceGroupName' 'serviceGroupType' ''' ret = True if servicegroup_exists(sg_name): return False nitro = _connect(**connection_args) if nitro is None: return False sg = NSServiceGroup() sg.set_servicegroupname(sg_name) sg.set_servicetype(sg_type.upper()) try: NSServiceGroup.add(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.add() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_delete(sg_name, **connection_args): ''' Delete a new service group CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_delete 'serviceGroupName' ''' ret = True sg = _servicegroup_get(sg_name, **connection_args) if sg is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.delete(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_exists(sg_name, s_name, s_port=None, **connection_args): ''' Check if a server:port combination is a member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_exists 'serviceGroupName' 'serverName' 'serverPort' ''' return _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) is not None def servicegroup_server_up(sg_name, s_name, s_port, **connection_args): ''' Check if a server:port combination is in state UP in a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_up 'serviceGroupName' 'serverName' 'serverPort' ''' server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) return server is not None and server.get_svrstate() == 'UP' def servicegroup_server_enable(sg_name, s_name, s_port, **connection_args): ''' Enable a server:port member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_enable 'serviceGroupName' 'serverName' 'serverPort' ''' ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.enable_server(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.enable_server() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_disable(sg_name, s_name, s_port, **connection_args): ''' Disable a server:port member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_disable 'serviceGroupName' 'serverName' 'serverPort' ''' ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.disable_server(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.disable_server() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_add(sg_name, s_name, s_port, **connection_args): ''' Add a server:port member to a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_add 'serviceGroupName' 'serverName' 'serverPort' ''' # Nitro will throw an error if the server is already present ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is not None: return False nitro = _connect(**connection_args) if nitro is None: return False sgsb = NSServiceGroupServerBinding() sgsb.set_servicegroupname(sg_name) sgsb.set_servername(s_name) sgsb.set_port(s_port) try: NSServiceGroupServerBinding.add(nitro, sgsb) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroupServerBinding() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_delete(sg_name, s_name, s_port, **connection_args): ''' Remove a server:port member from a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_delete 'serviceGroupName' 'serverName' 'serverPort' ''' # Nitro will throw an error if the server is already not present ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False sgsb = NSServiceGroupServerBinding() sgsb.set_servicegroupname(sg_name) sgsb.set_servername(s_name) sgsb.set_port(s_port) try: NSServiceGroupServerBinding.delete(nitro, sgsb) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroupServerBinding() failed: %s', error) ret = False _disconnect(nitro) return ret def _service_get(s_name, **connection_args): ''' Returns a service ressource or None ''' nitro = _connect(**connection_args) if nitro is None: return None service = NSService() service.set_name(s_name) try: service = NSService.get(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.get() failed: %s', error) service = None _disconnect(nitro) return service def service_exists(s_name, **connection_args): ''' Checks if a service exists CLI Example: .. code-block:: bash salt '*' netscaler.service_exists 'serviceName' ''' return _service_get(s_name, **connection_args) is not None def service_up(s_name, **connection_args): ''' Checks if a service is UP CLI Example: .. code-block:: bash salt '*' netscaler.service_up 'serviceName' ''' service = _service_get(s_name, **connection_args) return service is not None and service.get_svrstate() == 'UP' def service_enable(s_name, **connection_args): ''' Enable a service CLI Example: .. code-block:: bash salt '*' netscaler.service_enable 'serviceName' ''' ret = True service = _service_get(s_name, **connection_args) if service is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSService.enable(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def service_disable(s_name, s_delay=None, **connection_args): ''' Disable a service CLI Example: .. code-block:: bash salt '*' netscaler.service_disable 'serviceName' salt '*' netscaler.service_disable 'serviceName' 'delayInSeconds' ''' ret = True service = _service_get(s_name, **connection_args) if service is None: return False if s_delay is not None: service.set_delay(s_delay) nitro = _connect(**connection_args) if nitro is None: return False try: NSService.disable(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def _server_get(s_name, **connection_args): nitro = _connect(**connection_args) if nitro is None: return None server = NSServer() server.set_name(s_name) try: server = NSServer.get(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.get() failed: %s', error) server = None _disconnect(nitro) return server def server_exists(s_name, ip=None, s_state=None, **connection_args): ''' Checks if a server exists CLI Example: .. code-block:: bash salt '*' netscaler.server_exists 'serverName' ''' server = _server_get(s_name, **connection_args) if server is None: return False if ip is not None and ip != server.get_ipaddress(): return False if s_state is not None and s_state.upper() != server.get_state(): return False return True def server_add(s_name, s_ip, s_state=None, **connection_args): ''' Add a server Note: The default server state is ENABLED CLI Example: .. code-block:: bash salt '*' netscaler.server_add 'serverName' 'serverIpAddress' salt '*' netscaler.server_add 'serverName' 'serverIpAddress' 'serverState' ''' ret = True if server_exists(s_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False server = NSServer() server.set_name(s_name) server.set_ipaddress(s_ip) if s_state is not None: server.set_state(s_state) try: NSServer.add(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.add() failed: %s', error) ret = False _disconnect(nitro) return ret def server_delete(s_name, **connection_args): ''' Delete a server CLI Example: .. code-block:: bash salt '*' netscaler.server_delete 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.delete(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def server_update(s_name, s_ip, **connection_args): ''' Update a server's attributes CLI Example: .. code-block:: bash salt '*' netscaler.server_update 'serverName' 'serverIP' ''' altered = False cur_server = _server_get(s_name, **connection_args) if cur_server is None: return False alt_server = NSServer() alt_server.set_name(s_name) if cur_server.get_ipaddress() != s_ip: alt_server.set_ipaddress(s_ip) altered = True # Nothing to update, the server is already idem if altered is False: return False # Perform the update nitro = _connect(**connection_args) if nitro is None: return False ret = True try: NSServer.update(nitro, alt_server) except NSNitroError as error: log.debug('netscaler module error - NSServer.update() failed: %s', error) ret = False _disconnect(nitro) return ret def server_enabled(s_name, **connection_args): ''' Check if a server is enabled globally CLI Example: .. code-block:: bash salt '*' netscaler.server_enabled 'serverName' ''' server = _server_get(s_name, **connection_args) return server is not None and server.get_state() == 'ENABLED' def server_enable(s_name, **connection_args): ''' Enables a server globally CLI Example: .. code-block:: bash salt '*' netscaler.server_enable 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False if server.get_state() == 'ENABLED': return True nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.enable(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def server_disable(s_name, **connection_args): ''' Disable a server globally CLI Example: .. code-block:: bash salt '*' netscaler.server_disable 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False if server.get_state() == 'DISABLED': return True nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.disable(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.disable() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_get(v_name, **connection_args): nitro = _connect(**connection_args) vserver = NSLBVServer() vserver.set_name(v_name) if nitro is None: return None try: vserver = NSLBVServer.get(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSLBVServer.get() failed: %s', error) vserver = None _disconnect(nitro) return vserver def vserver_exists(v_name, v_ip=None, v_port=None, v_type=None, **connection_args): ''' Checks if a vserver exists CLI Example: .. code-block:: bash salt '*' netscaler.vserver_exists 'vserverName' ''' vserver = _vserver_get(v_name, **connection_args) if vserver is None: return False if v_ip is not None and vserver.get_ipv46() != v_ip: return False if v_port is not None and vserver.get_port() != v_port: return False if v_type is not None and vserver.get_servicetype().upper() != v_type.upper(): return False return True def vserver_add(v_name, v_ip, v_port, v_type, **connection_args): ''' Add a new lb vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_add 'vserverName' 'vserverIP' 'vserverPort' 'vserverType' salt '*' netscaler.vserver_add 'alex.patate.chaude.443' '1.2.3.4' '443' 'SSL' ''' ret = True if vserver_exists(v_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vserver = NSLBVServer() vserver.set_name(v_name) vserver.set_ipv46(v_ip) vserver.set_port(v_port) vserver.set_servicetype(v_type.upper()) try: NSLBVServer.add(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSLBVServer.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_delete(v_name, **connection_args): ''' Delete a lb vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_delete 'vserverName' ''' ret = True vserver = _vserver_get(v_name, **connection_args) if vserver is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSLBVServer.delete(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSVServer.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_servicegroup_get(v_name, sg_name, **connection_args): ret = None nitro = _connect(**connection_args) if nitro is None: return None vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) try: vsgs = NSLBVServerServiceGroupBinding.get(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.get() failed: %s', error) return None for vsg in vsgs: if vsg.get_servicegroupname() == sg_name: ret = vsg _disconnect(nitro) return ret def vserver_servicegroup_exists(v_name, sg_name, **connection_args): ''' Checks if a servicegroup is tied to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_exists 'vserverName' 'serviceGroupName' ''' return _vserver_servicegroup_get(v_name, sg_name, **connection_args) is not None def vserver_servicegroup_add(v_name, sg_name, **connection_args): ''' Bind a servicegroup to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_add 'vserverName' 'serviceGroupName' ''' ret = True if vserver_servicegroup_exists(v_name, sg_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) vsg.set_servicegroupname(sg_name) try: NSLBVServerServiceGroupBinding.add(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_servicegroup_delete(v_name, sg_name, **connection_args): ''' Unbind a servicegroup from a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_delete 'vserverName' 'serviceGroupName' ''' ret = True if not vserver_servicegroup_exists(v_name, sg_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) vsg.set_servicegroupname(sg_name) try: NSLBVServerServiceGroupBinding.delete(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_sslcert_get(v_name, sc_name, **connection_args): ret = None nitro = _connect(**connection_args) if nitro is None: return None sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) try: sslcerts = NSSSLVServerSSLCertKeyBinding.get(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.get() failed: %s', error) return None for sslcert in sslcerts: if sslcert.get_certkeyname() == sc_name: ret = sslcert return ret def vserver_sslcert_exists(v_name, sc_name, **connection_args): ''' Checks if a SSL certificate is tied to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_exists 'vserverName' 'sslCertificateName' ''' return _vserver_sslcert_get(v_name, sc_name, **connection_args) is not None def vserver_sslcert_add(v_name, sc_name, **connection_args): ''' Binds a SSL certificate to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_add 'vserverName' 'sslCertificateName' ''' ret = True if vserver_sslcert_exists(v_name, sc_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) sslcert.set_certkeyname(sc_name) try: NSSSLVServerSSLCertKeyBinding.add(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_sslcert_delete(v_name, sc_name, **connection_args): ''' Unbinds a SSL certificate from a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_delete 'vserverName' 'sslCertificateName' ''' ret = True if not vserver_sslcert_exists(v_name, sc_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) sslcert.set_certkeyname(sc_name) try: NSSSLVServerSSLCertKeyBinding.delete(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.delete() failed: %s', error) ret = False _disconnect(nitro) return ret
saltstack/salt
salt/modules/netscaler.py
servicegroup_add
python
def servicegroup_add(sg_name, sg_type='HTTP', **connection_args): ''' Add a new service group If no service type is specified, HTTP will be used. Most common service types: HTTP, SSL, and SSL_BRIDGE CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_add 'serviceGroupName' salt '*' netscaler.servicegroup_add 'serviceGroupName' 'serviceGroupType' ''' ret = True if servicegroup_exists(sg_name): return False nitro = _connect(**connection_args) if nitro is None: return False sg = NSServiceGroup() sg.set_servicegroupname(sg_name) sg.set_servicetype(sg_type.upper()) try: NSServiceGroup.add(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.add() failed: %s', error) ret = False _disconnect(nitro) return ret
Add a new service group If no service type is specified, HTTP will be used. Most common service types: HTTP, SSL, and SSL_BRIDGE CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_add 'serviceGroupName' salt '*' netscaler.servicegroup_add 'serviceGroupName' 'serviceGroupType'
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/netscaler.py#L214-L242
[ "def _connect(**kwargs):\n '''\n Initialise netscaler connection\n '''\n connargs = dict()\n\n # Shamelessy ripped from the mysql module\n def __connarg(name, key=None, default=None):\n '''\n Add key to connargs, only if name exists in our kwargs or as\n netscaler.<name> in __opts__ or __pillar__ Evaluate in said order - kwargs,\n opts then pillar. To avoid collision with other functions, kwargs-based\n connection arguments are prefixed with 'netscaler_' (i.e.\n 'netscaler_host', 'netscaler_user', etc.).\n '''\n if key is None:\n key = name\n if name in kwargs:\n connargs[key] = kwargs[name]\n else:\n prefix = 'netscaler_'\n if name.startswith(prefix):\n try:\n name = name[len(prefix):]\n except IndexError:\n return\n val = __salt__['config.option']('netscaler.{0}'.format(name), None)\n if val is not None:\n connargs[key] = val\n elif default is not None:\n connargs[key] = default\n\n __connarg('netscaler_host', 'host')\n __connarg('netscaler_user', 'user')\n __connarg('netscaler_pass', 'pass')\n __connarg('netscaler_useSSL', 'useSSL', True)\n\n nitro = NSNitro(connargs['host'], connargs['user'], connargs['pass'], connargs['useSSL'])\n try:\n nitro.login()\n except NSNitroError as error:\n log.debug('netscaler module error - NSNitro.login() failed: %s', error)\n return None\n return nitro\n", "def _disconnect(nitro):\n try:\n nitro.logout()\n except NSNitroError as error:\n log.debug('netscaler module error - NSNitro.logout() failed: %s', error)\n return None\n return nitro\n", "def servicegroup_exists(sg_name, sg_type=None, **connection_args):\n '''\n Checks if a service group exists\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' netscaler.servicegroup_exists 'serviceGroupName'\n '''\n sg = _servicegroup_get(sg_name, **connection_args)\n if sg is None:\n return False\n if sg_type is not None and sg_type.upper() != sg.get_servicetype():\n return False\n return True\n" ]
# -*- coding: utf-8 -*- ''' Module to provide Citrix Netscaler compatibility to Salt (compatible with netscaler 9.2+) .. versionadded:: 2015.2.0 :depends: - nsnitro Python module .. note:: You can install nsnitro using: .. code-block:: bash pip install nsnitro :configuration: This module accepts connection configuration details either as parameters, or as configuration settings in /etc/salt/minion on the relevant minions .. code-block:: yaml netscaler.host: 1.2.3.4 netscaler.user: user netscaler.pass: password This data can also be passed into pillar. Options passed into opts will overwrite options passed into pillar. :CLI Examples: Calls relying on configuration passed using /etc/salt/minion, grains, or pillars: .. code-block:: bash salt-call netscaler.server_exists server_name Calls passing configuration as opts .. code-block:: bash salt-call netscaler.server_exists server_name netscaler_host=1.2.3.4 netscaler_user=username netscaler_pass=password salt-call netscaler.server_exists server_name netscaler_host=1.2.3.5 netscaler_user=username2 netscaler_pass=password2 salt-call netscaler.server_enable server_name2 netscaler_host=1.2.3.5 salt-call netscaler.server_up server_name3 netscaler_host=1.2.3.6 netscaler_useSSL=False ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import logging # Import Salt libs import salt.utils.platform try: from nsnitro.nsnitro import NSNitro from nsnitro.nsexceptions import NSNitroError from nsnitro.nsresources.nsserver import NSServer from nsnitro.nsresources.nsservice import NSService from nsnitro.nsresources.nsservicegroup import NSServiceGroup from nsnitro.nsresources.nsservicegroupserverbinding import NSServiceGroupServerBinding from nsnitro.nsresources.nslbvserver import NSLBVServer from nsnitro.nsresources.nslbvserverservicegroupbinding import NSLBVServerServiceGroupBinding from nsnitro.nsresources.nssslvserversslcertkeybinding import NSSSLVServerSSLCertKeyBinding HAS_NSNITRO = True except ImportError: HAS_NSNITRO = False log = logging.getLogger(__name__) def __virtual__(): ''' Only load this module if the nsnitro library is installed ''' if salt.utils.platform.is_windows(): return ( False, 'The netscaler execution module failed to load: not available ' 'on Windows.' ) if HAS_NSNITRO: return 'netscaler' return ( False, 'The netscaler execution module failed to load: the nsnitro python ' 'library is not available.' ) def _connect(**kwargs): ''' Initialise netscaler connection ''' connargs = dict() # Shamelessy ripped from the mysql module def __connarg(name, key=None, default=None): ''' Add key to connargs, only if name exists in our kwargs or as netscaler.<name> in __opts__ or __pillar__ Evaluate in said order - kwargs, opts then pillar. To avoid collision with other functions, kwargs-based connection arguments are prefixed with 'netscaler_' (i.e. 'netscaler_host', 'netscaler_user', etc.). ''' if key is None: key = name if name in kwargs: connargs[key] = kwargs[name] else: prefix = 'netscaler_' if name.startswith(prefix): try: name = name[len(prefix):] except IndexError: return val = __salt__['config.option']('netscaler.{0}'.format(name), None) if val is not None: connargs[key] = val elif default is not None: connargs[key] = default __connarg('netscaler_host', 'host') __connarg('netscaler_user', 'user') __connarg('netscaler_pass', 'pass') __connarg('netscaler_useSSL', 'useSSL', True) nitro = NSNitro(connargs['host'], connargs['user'], connargs['pass'], connargs['useSSL']) try: nitro.login() except NSNitroError as error: log.debug('netscaler module error - NSNitro.login() failed: %s', error) return None return nitro def _disconnect(nitro): try: nitro.logout() except NSNitroError as error: log.debug('netscaler module error - NSNitro.logout() failed: %s', error) return None return nitro def _servicegroup_get(sg_name, **connection_args): ''' Return a service group ressource or None ''' nitro = _connect(**connection_args) if nitro is None: return None sg = NSServiceGroup() sg.set_servicegroupname(sg_name) try: sg = NSServiceGroup.get(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.get() failed: %s', error) sg = None _disconnect(nitro) return sg def _servicegroup_get_servers(sg_name, **connection_args): ''' Returns a list of members of a servicegroup or None ''' nitro = _connect(**connection_args) if nitro is None: return None sg = NSServiceGroup() sg.set_servicegroupname(sg_name) try: sg = NSServiceGroup.get_servers(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.get_servers failed(): %s', error) sg = None _disconnect(nitro) return sg def _servicegroup_get_server(sg_name, s_name, s_port=None, **connection_args): ''' Returns a member of a service group or None ''' ret = None servers = _servicegroup_get_servers(sg_name, **connection_args) if servers is None: return None for server in servers: if server.get_servername() == s_name: if s_port is not None and s_port != server.get_port(): ret = None ret = server return ret def servicegroup_exists(sg_name, sg_type=None, **connection_args): ''' Checks if a service group exists CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_exists 'serviceGroupName' ''' sg = _servicegroup_get(sg_name, **connection_args) if sg is None: return False if sg_type is not None and sg_type.upper() != sg.get_servicetype(): return False return True def servicegroup_delete(sg_name, **connection_args): ''' Delete a new service group CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_delete 'serviceGroupName' ''' ret = True sg = _servicegroup_get(sg_name, **connection_args) if sg is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.delete(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_exists(sg_name, s_name, s_port=None, **connection_args): ''' Check if a server:port combination is a member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_exists 'serviceGroupName' 'serverName' 'serverPort' ''' return _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) is not None def servicegroup_server_up(sg_name, s_name, s_port, **connection_args): ''' Check if a server:port combination is in state UP in a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_up 'serviceGroupName' 'serverName' 'serverPort' ''' server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) return server is not None and server.get_svrstate() == 'UP' def servicegroup_server_enable(sg_name, s_name, s_port, **connection_args): ''' Enable a server:port member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_enable 'serviceGroupName' 'serverName' 'serverPort' ''' ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.enable_server(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.enable_server() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_disable(sg_name, s_name, s_port, **connection_args): ''' Disable a server:port member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_disable 'serviceGroupName' 'serverName' 'serverPort' ''' ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.disable_server(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.disable_server() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_add(sg_name, s_name, s_port, **connection_args): ''' Add a server:port member to a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_add 'serviceGroupName' 'serverName' 'serverPort' ''' # Nitro will throw an error if the server is already present ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is not None: return False nitro = _connect(**connection_args) if nitro is None: return False sgsb = NSServiceGroupServerBinding() sgsb.set_servicegroupname(sg_name) sgsb.set_servername(s_name) sgsb.set_port(s_port) try: NSServiceGroupServerBinding.add(nitro, sgsb) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroupServerBinding() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_delete(sg_name, s_name, s_port, **connection_args): ''' Remove a server:port member from a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_delete 'serviceGroupName' 'serverName' 'serverPort' ''' # Nitro will throw an error if the server is already not present ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False sgsb = NSServiceGroupServerBinding() sgsb.set_servicegroupname(sg_name) sgsb.set_servername(s_name) sgsb.set_port(s_port) try: NSServiceGroupServerBinding.delete(nitro, sgsb) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroupServerBinding() failed: %s', error) ret = False _disconnect(nitro) return ret def _service_get(s_name, **connection_args): ''' Returns a service ressource or None ''' nitro = _connect(**connection_args) if nitro is None: return None service = NSService() service.set_name(s_name) try: service = NSService.get(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.get() failed: %s', error) service = None _disconnect(nitro) return service def service_exists(s_name, **connection_args): ''' Checks if a service exists CLI Example: .. code-block:: bash salt '*' netscaler.service_exists 'serviceName' ''' return _service_get(s_name, **connection_args) is not None def service_up(s_name, **connection_args): ''' Checks if a service is UP CLI Example: .. code-block:: bash salt '*' netscaler.service_up 'serviceName' ''' service = _service_get(s_name, **connection_args) return service is not None and service.get_svrstate() == 'UP' def service_enable(s_name, **connection_args): ''' Enable a service CLI Example: .. code-block:: bash salt '*' netscaler.service_enable 'serviceName' ''' ret = True service = _service_get(s_name, **connection_args) if service is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSService.enable(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def service_disable(s_name, s_delay=None, **connection_args): ''' Disable a service CLI Example: .. code-block:: bash salt '*' netscaler.service_disable 'serviceName' salt '*' netscaler.service_disable 'serviceName' 'delayInSeconds' ''' ret = True service = _service_get(s_name, **connection_args) if service is None: return False if s_delay is not None: service.set_delay(s_delay) nitro = _connect(**connection_args) if nitro is None: return False try: NSService.disable(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def _server_get(s_name, **connection_args): nitro = _connect(**connection_args) if nitro is None: return None server = NSServer() server.set_name(s_name) try: server = NSServer.get(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.get() failed: %s', error) server = None _disconnect(nitro) return server def server_exists(s_name, ip=None, s_state=None, **connection_args): ''' Checks if a server exists CLI Example: .. code-block:: bash salt '*' netscaler.server_exists 'serverName' ''' server = _server_get(s_name, **connection_args) if server is None: return False if ip is not None and ip != server.get_ipaddress(): return False if s_state is not None and s_state.upper() != server.get_state(): return False return True def server_add(s_name, s_ip, s_state=None, **connection_args): ''' Add a server Note: The default server state is ENABLED CLI Example: .. code-block:: bash salt '*' netscaler.server_add 'serverName' 'serverIpAddress' salt '*' netscaler.server_add 'serverName' 'serverIpAddress' 'serverState' ''' ret = True if server_exists(s_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False server = NSServer() server.set_name(s_name) server.set_ipaddress(s_ip) if s_state is not None: server.set_state(s_state) try: NSServer.add(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.add() failed: %s', error) ret = False _disconnect(nitro) return ret def server_delete(s_name, **connection_args): ''' Delete a server CLI Example: .. code-block:: bash salt '*' netscaler.server_delete 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.delete(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def server_update(s_name, s_ip, **connection_args): ''' Update a server's attributes CLI Example: .. code-block:: bash salt '*' netscaler.server_update 'serverName' 'serverIP' ''' altered = False cur_server = _server_get(s_name, **connection_args) if cur_server is None: return False alt_server = NSServer() alt_server.set_name(s_name) if cur_server.get_ipaddress() != s_ip: alt_server.set_ipaddress(s_ip) altered = True # Nothing to update, the server is already idem if altered is False: return False # Perform the update nitro = _connect(**connection_args) if nitro is None: return False ret = True try: NSServer.update(nitro, alt_server) except NSNitroError as error: log.debug('netscaler module error - NSServer.update() failed: %s', error) ret = False _disconnect(nitro) return ret def server_enabled(s_name, **connection_args): ''' Check if a server is enabled globally CLI Example: .. code-block:: bash salt '*' netscaler.server_enabled 'serverName' ''' server = _server_get(s_name, **connection_args) return server is not None and server.get_state() == 'ENABLED' def server_enable(s_name, **connection_args): ''' Enables a server globally CLI Example: .. code-block:: bash salt '*' netscaler.server_enable 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False if server.get_state() == 'ENABLED': return True nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.enable(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def server_disable(s_name, **connection_args): ''' Disable a server globally CLI Example: .. code-block:: bash salt '*' netscaler.server_disable 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False if server.get_state() == 'DISABLED': return True nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.disable(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.disable() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_get(v_name, **connection_args): nitro = _connect(**connection_args) vserver = NSLBVServer() vserver.set_name(v_name) if nitro is None: return None try: vserver = NSLBVServer.get(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSLBVServer.get() failed: %s', error) vserver = None _disconnect(nitro) return vserver def vserver_exists(v_name, v_ip=None, v_port=None, v_type=None, **connection_args): ''' Checks if a vserver exists CLI Example: .. code-block:: bash salt '*' netscaler.vserver_exists 'vserverName' ''' vserver = _vserver_get(v_name, **connection_args) if vserver is None: return False if v_ip is not None and vserver.get_ipv46() != v_ip: return False if v_port is not None and vserver.get_port() != v_port: return False if v_type is not None and vserver.get_servicetype().upper() != v_type.upper(): return False return True def vserver_add(v_name, v_ip, v_port, v_type, **connection_args): ''' Add a new lb vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_add 'vserverName' 'vserverIP' 'vserverPort' 'vserverType' salt '*' netscaler.vserver_add 'alex.patate.chaude.443' '1.2.3.4' '443' 'SSL' ''' ret = True if vserver_exists(v_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vserver = NSLBVServer() vserver.set_name(v_name) vserver.set_ipv46(v_ip) vserver.set_port(v_port) vserver.set_servicetype(v_type.upper()) try: NSLBVServer.add(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSLBVServer.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_delete(v_name, **connection_args): ''' Delete a lb vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_delete 'vserverName' ''' ret = True vserver = _vserver_get(v_name, **connection_args) if vserver is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSLBVServer.delete(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSVServer.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_servicegroup_get(v_name, sg_name, **connection_args): ret = None nitro = _connect(**connection_args) if nitro is None: return None vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) try: vsgs = NSLBVServerServiceGroupBinding.get(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.get() failed: %s', error) return None for vsg in vsgs: if vsg.get_servicegroupname() == sg_name: ret = vsg _disconnect(nitro) return ret def vserver_servicegroup_exists(v_name, sg_name, **connection_args): ''' Checks if a servicegroup is tied to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_exists 'vserverName' 'serviceGroupName' ''' return _vserver_servicegroup_get(v_name, sg_name, **connection_args) is not None def vserver_servicegroup_add(v_name, sg_name, **connection_args): ''' Bind a servicegroup to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_add 'vserverName' 'serviceGroupName' ''' ret = True if vserver_servicegroup_exists(v_name, sg_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) vsg.set_servicegroupname(sg_name) try: NSLBVServerServiceGroupBinding.add(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_servicegroup_delete(v_name, sg_name, **connection_args): ''' Unbind a servicegroup from a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_delete 'vserverName' 'serviceGroupName' ''' ret = True if not vserver_servicegroup_exists(v_name, sg_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) vsg.set_servicegroupname(sg_name) try: NSLBVServerServiceGroupBinding.delete(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_sslcert_get(v_name, sc_name, **connection_args): ret = None nitro = _connect(**connection_args) if nitro is None: return None sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) try: sslcerts = NSSSLVServerSSLCertKeyBinding.get(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.get() failed: %s', error) return None for sslcert in sslcerts: if sslcert.get_certkeyname() == sc_name: ret = sslcert return ret def vserver_sslcert_exists(v_name, sc_name, **connection_args): ''' Checks if a SSL certificate is tied to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_exists 'vserverName' 'sslCertificateName' ''' return _vserver_sslcert_get(v_name, sc_name, **connection_args) is not None def vserver_sslcert_add(v_name, sc_name, **connection_args): ''' Binds a SSL certificate to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_add 'vserverName' 'sslCertificateName' ''' ret = True if vserver_sslcert_exists(v_name, sc_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) sslcert.set_certkeyname(sc_name) try: NSSSLVServerSSLCertKeyBinding.add(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_sslcert_delete(v_name, sc_name, **connection_args): ''' Unbinds a SSL certificate from a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_delete 'vserverName' 'sslCertificateName' ''' ret = True if not vserver_sslcert_exists(v_name, sc_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) sslcert.set_certkeyname(sc_name) try: NSSSLVServerSSLCertKeyBinding.delete(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.delete() failed: %s', error) ret = False _disconnect(nitro) return ret
saltstack/salt
salt/modules/netscaler.py
servicegroup_delete
python
def servicegroup_delete(sg_name, **connection_args): ''' Delete a new service group CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_delete 'serviceGroupName' ''' ret = True sg = _servicegroup_get(sg_name, **connection_args) if sg is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.delete(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.delete() failed: %s', error) ret = False _disconnect(nitro) return ret
Delete a new service group CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_delete 'serviceGroupName'
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/netscaler.py#L245-L268
[ "def _connect(**kwargs):\n '''\n Initialise netscaler connection\n '''\n connargs = dict()\n\n # Shamelessy ripped from the mysql module\n def __connarg(name, key=None, default=None):\n '''\n Add key to connargs, only if name exists in our kwargs or as\n netscaler.<name> in __opts__ or __pillar__ Evaluate in said order - kwargs,\n opts then pillar. To avoid collision with other functions, kwargs-based\n connection arguments are prefixed with 'netscaler_' (i.e.\n 'netscaler_host', 'netscaler_user', etc.).\n '''\n if key is None:\n key = name\n if name in kwargs:\n connargs[key] = kwargs[name]\n else:\n prefix = 'netscaler_'\n if name.startswith(prefix):\n try:\n name = name[len(prefix):]\n except IndexError:\n return\n val = __salt__['config.option']('netscaler.{0}'.format(name), None)\n if val is not None:\n connargs[key] = val\n elif default is not None:\n connargs[key] = default\n\n __connarg('netscaler_host', 'host')\n __connarg('netscaler_user', 'user')\n __connarg('netscaler_pass', 'pass')\n __connarg('netscaler_useSSL', 'useSSL', True)\n\n nitro = NSNitro(connargs['host'], connargs['user'], connargs['pass'], connargs['useSSL'])\n try:\n nitro.login()\n except NSNitroError as error:\n log.debug('netscaler module error - NSNitro.login() failed: %s', error)\n return None\n return nitro\n", "def _disconnect(nitro):\n try:\n nitro.logout()\n except NSNitroError as error:\n log.debug('netscaler module error - NSNitro.logout() failed: %s', error)\n return None\n return nitro\n", "def _servicegroup_get(sg_name, **connection_args):\n '''\n Return a service group ressource or None\n '''\n nitro = _connect(**connection_args)\n if nitro is None:\n return None\n sg = NSServiceGroup()\n sg.set_servicegroupname(sg_name)\n try:\n sg = NSServiceGroup.get(nitro, sg)\n except NSNitroError as error:\n log.debug('netscaler module error - NSServiceGroup.get() failed: %s', error)\n sg = None\n _disconnect(nitro)\n return sg\n" ]
# -*- coding: utf-8 -*- ''' Module to provide Citrix Netscaler compatibility to Salt (compatible with netscaler 9.2+) .. versionadded:: 2015.2.0 :depends: - nsnitro Python module .. note:: You can install nsnitro using: .. code-block:: bash pip install nsnitro :configuration: This module accepts connection configuration details either as parameters, or as configuration settings in /etc/salt/minion on the relevant minions .. code-block:: yaml netscaler.host: 1.2.3.4 netscaler.user: user netscaler.pass: password This data can also be passed into pillar. Options passed into opts will overwrite options passed into pillar. :CLI Examples: Calls relying on configuration passed using /etc/salt/minion, grains, or pillars: .. code-block:: bash salt-call netscaler.server_exists server_name Calls passing configuration as opts .. code-block:: bash salt-call netscaler.server_exists server_name netscaler_host=1.2.3.4 netscaler_user=username netscaler_pass=password salt-call netscaler.server_exists server_name netscaler_host=1.2.3.5 netscaler_user=username2 netscaler_pass=password2 salt-call netscaler.server_enable server_name2 netscaler_host=1.2.3.5 salt-call netscaler.server_up server_name3 netscaler_host=1.2.3.6 netscaler_useSSL=False ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import logging # Import Salt libs import salt.utils.platform try: from nsnitro.nsnitro import NSNitro from nsnitro.nsexceptions import NSNitroError from nsnitro.nsresources.nsserver import NSServer from nsnitro.nsresources.nsservice import NSService from nsnitro.nsresources.nsservicegroup import NSServiceGroup from nsnitro.nsresources.nsservicegroupserverbinding import NSServiceGroupServerBinding from nsnitro.nsresources.nslbvserver import NSLBVServer from nsnitro.nsresources.nslbvserverservicegroupbinding import NSLBVServerServiceGroupBinding from nsnitro.nsresources.nssslvserversslcertkeybinding import NSSSLVServerSSLCertKeyBinding HAS_NSNITRO = True except ImportError: HAS_NSNITRO = False log = logging.getLogger(__name__) def __virtual__(): ''' Only load this module if the nsnitro library is installed ''' if salt.utils.platform.is_windows(): return ( False, 'The netscaler execution module failed to load: not available ' 'on Windows.' ) if HAS_NSNITRO: return 'netscaler' return ( False, 'The netscaler execution module failed to load: the nsnitro python ' 'library is not available.' ) def _connect(**kwargs): ''' Initialise netscaler connection ''' connargs = dict() # Shamelessy ripped from the mysql module def __connarg(name, key=None, default=None): ''' Add key to connargs, only if name exists in our kwargs or as netscaler.<name> in __opts__ or __pillar__ Evaluate in said order - kwargs, opts then pillar. To avoid collision with other functions, kwargs-based connection arguments are prefixed with 'netscaler_' (i.e. 'netscaler_host', 'netscaler_user', etc.). ''' if key is None: key = name if name in kwargs: connargs[key] = kwargs[name] else: prefix = 'netscaler_' if name.startswith(prefix): try: name = name[len(prefix):] except IndexError: return val = __salt__['config.option']('netscaler.{0}'.format(name), None) if val is not None: connargs[key] = val elif default is not None: connargs[key] = default __connarg('netscaler_host', 'host') __connarg('netscaler_user', 'user') __connarg('netscaler_pass', 'pass') __connarg('netscaler_useSSL', 'useSSL', True) nitro = NSNitro(connargs['host'], connargs['user'], connargs['pass'], connargs['useSSL']) try: nitro.login() except NSNitroError as error: log.debug('netscaler module error - NSNitro.login() failed: %s', error) return None return nitro def _disconnect(nitro): try: nitro.logout() except NSNitroError as error: log.debug('netscaler module error - NSNitro.logout() failed: %s', error) return None return nitro def _servicegroup_get(sg_name, **connection_args): ''' Return a service group ressource or None ''' nitro = _connect(**connection_args) if nitro is None: return None sg = NSServiceGroup() sg.set_servicegroupname(sg_name) try: sg = NSServiceGroup.get(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.get() failed: %s', error) sg = None _disconnect(nitro) return sg def _servicegroup_get_servers(sg_name, **connection_args): ''' Returns a list of members of a servicegroup or None ''' nitro = _connect(**connection_args) if nitro is None: return None sg = NSServiceGroup() sg.set_servicegroupname(sg_name) try: sg = NSServiceGroup.get_servers(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.get_servers failed(): %s', error) sg = None _disconnect(nitro) return sg def _servicegroup_get_server(sg_name, s_name, s_port=None, **connection_args): ''' Returns a member of a service group or None ''' ret = None servers = _servicegroup_get_servers(sg_name, **connection_args) if servers is None: return None for server in servers: if server.get_servername() == s_name: if s_port is not None and s_port != server.get_port(): ret = None ret = server return ret def servicegroup_exists(sg_name, sg_type=None, **connection_args): ''' Checks if a service group exists CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_exists 'serviceGroupName' ''' sg = _servicegroup_get(sg_name, **connection_args) if sg is None: return False if sg_type is not None and sg_type.upper() != sg.get_servicetype(): return False return True def servicegroup_add(sg_name, sg_type='HTTP', **connection_args): ''' Add a new service group If no service type is specified, HTTP will be used. Most common service types: HTTP, SSL, and SSL_BRIDGE CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_add 'serviceGroupName' salt '*' netscaler.servicegroup_add 'serviceGroupName' 'serviceGroupType' ''' ret = True if servicegroup_exists(sg_name): return False nitro = _connect(**connection_args) if nitro is None: return False sg = NSServiceGroup() sg.set_servicegroupname(sg_name) sg.set_servicetype(sg_type.upper()) try: NSServiceGroup.add(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.add() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_exists(sg_name, s_name, s_port=None, **connection_args): ''' Check if a server:port combination is a member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_exists 'serviceGroupName' 'serverName' 'serverPort' ''' return _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) is not None def servicegroup_server_up(sg_name, s_name, s_port, **connection_args): ''' Check if a server:port combination is in state UP in a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_up 'serviceGroupName' 'serverName' 'serverPort' ''' server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) return server is not None and server.get_svrstate() == 'UP' def servicegroup_server_enable(sg_name, s_name, s_port, **connection_args): ''' Enable a server:port member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_enable 'serviceGroupName' 'serverName' 'serverPort' ''' ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.enable_server(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.enable_server() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_disable(sg_name, s_name, s_port, **connection_args): ''' Disable a server:port member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_disable 'serviceGroupName' 'serverName' 'serverPort' ''' ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.disable_server(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.disable_server() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_add(sg_name, s_name, s_port, **connection_args): ''' Add a server:port member to a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_add 'serviceGroupName' 'serverName' 'serverPort' ''' # Nitro will throw an error if the server is already present ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is not None: return False nitro = _connect(**connection_args) if nitro is None: return False sgsb = NSServiceGroupServerBinding() sgsb.set_servicegroupname(sg_name) sgsb.set_servername(s_name) sgsb.set_port(s_port) try: NSServiceGroupServerBinding.add(nitro, sgsb) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroupServerBinding() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_delete(sg_name, s_name, s_port, **connection_args): ''' Remove a server:port member from a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_delete 'serviceGroupName' 'serverName' 'serverPort' ''' # Nitro will throw an error if the server is already not present ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False sgsb = NSServiceGroupServerBinding() sgsb.set_servicegroupname(sg_name) sgsb.set_servername(s_name) sgsb.set_port(s_port) try: NSServiceGroupServerBinding.delete(nitro, sgsb) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroupServerBinding() failed: %s', error) ret = False _disconnect(nitro) return ret def _service_get(s_name, **connection_args): ''' Returns a service ressource or None ''' nitro = _connect(**connection_args) if nitro is None: return None service = NSService() service.set_name(s_name) try: service = NSService.get(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.get() failed: %s', error) service = None _disconnect(nitro) return service def service_exists(s_name, **connection_args): ''' Checks if a service exists CLI Example: .. code-block:: bash salt '*' netscaler.service_exists 'serviceName' ''' return _service_get(s_name, **connection_args) is not None def service_up(s_name, **connection_args): ''' Checks if a service is UP CLI Example: .. code-block:: bash salt '*' netscaler.service_up 'serviceName' ''' service = _service_get(s_name, **connection_args) return service is not None and service.get_svrstate() == 'UP' def service_enable(s_name, **connection_args): ''' Enable a service CLI Example: .. code-block:: bash salt '*' netscaler.service_enable 'serviceName' ''' ret = True service = _service_get(s_name, **connection_args) if service is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSService.enable(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def service_disable(s_name, s_delay=None, **connection_args): ''' Disable a service CLI Example: .. code-block:: bash salt '*' netscaler.service_disable 'serviceName' salt '*' netscaler.service_disable 'serviceName' 'delayInSeconds' ''' ret = True service = _service_get(s_name, **connection_args) if service is None: return False if s_delay is not None: service.set_delay(s_delay) nitro = _connect(**connection_args) if nitro is None: return False try: NSService.disable(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def _server_get(s_name, **connection_args): nitro = _connect(**connection_args) if nitro is None: return None server = NSServer() server.set_name(s_name) try: server = NSServer.get(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.get() failed: %s', error) server = None _disconnect(nitro) return server def server_exists(s_name, ip=None, s_state=None, **connection_args): ''' Checks if a server exists CLI Example: .. code-block:: bash salt '*' netscaler.server_exists 'serverName' ''' server = _server_get(s_name, **connection_args) if server is None: return False if ip is not None and ip != server.get_ipaddress(): return False if s_state is not None and s_state.upper() != server.get_state(): return False return True def server_add(s_name, s_ip, s_state=None, **connection_args): ''' Add a server Note: The default server state is ENABLED CLI Example: .. code-block:: bash salt '*' netscaler.server_add 'serverName' 'serverIpAddress' salt '*' netscaler.server_add 'serverName' 'serverIpAddress' 'serverState' ''' ret = True if server_exists(s_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False server = NSServer() server.set_name(s_name) server.set_ipaddress(s_ip) if s_state is not None: server.set_state(s_state) try: NSServer.add(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.add() failed: %s', error) ret = False _disconnect(nitro) return ret def server_delete(s_name, **connection_args): ''' Delete a server CLI Example: .. code-block:: bash salt '*' netscaler.server_delete 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.delete(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def server_update(s_name, s_ip, **connection_args): ''' Update a server's attributes CLI Example: .. code-block:: bash salt '*' netscaler.server_update 'serverName' 'serverIP' ''' altered = False cur_server = _server_get(s_name, **connection_args) if cur_server is None: return False alt_server = NSServer() alt_server.set_name(s_name) if cur_server.get_ipaddress() != s_ip: alt_server.set_ipaddress(s_ip) altered = True # Nothing to update, the server is already idem if altered is False: return False # Perform the update nitro = _connect(**connection_args) if nitro is None: return False ret = True try: NSServer.update(nitro, alt_server) except NSNitroError as error: log.debug('netscaler module error - NSServer.update() failed: %s', error) ret = False _disconnect(nitro) return ret def server_enabled(s_name, **connection_args): ''' Check if a server is enabled globally CLI Example: .. code-block:: bash salt '*' netscaler.server_enabled 'serverName' ''' server = _server_get(s_name, **connection_args) return server is not None and server.get_state() == 'ENABLED' def server_enable(s_name, **connection_args): ''' Enables a server globally CLI Example: .. code-block:: bash salt '*' netscaler.server_enable 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False if server.get_state() == 'ENABLED': return True nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.enable(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def server_disable(s_name, **connection_args): ''' Disable a server globally CLI Example: .. code-block:: bash salt '*' netscaler.server_disable 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False if server.get_state() == 'DISABLED': return True nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.disable(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.disable() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_get(v_name, **connection_args): nitro = _connect(**connection_args) vserver = NSLBVServer() vserver.set_name(v_name) if nitro is None: return None try: vserver = NSLBVServer.get(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSLBVServer.get() failed: %s', error) vserver = None _disconnect(nitro) return vserver def vserver_exists(v_name, v_ip=None, v_port=None, v_type=None, **connection_args): ''' Checks if a vserver exists CLI Example: .. code-block:: bash salt '*' netscaler.vserver_exists 'vserverName' ''' vserver = _vserver_get(v_name, **connection_args) if vserver is None: return False if v_ip is not None and vserver.get_ipv46() != v_ip: return False if v_port is not None and vserver.get_port() != v_port: return False if v_type is not None and vserver.get_servicetype().upper() != v_type.upper(): return False return True def vserver_add(v_name, v_ip, v_port, v_type, **connection_args): ''' Add a new lb vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_add 'vserverName' 'vserverIP' 'vserverPort' 'vserverType' salt '*' netscaler.vserver_add 'alex.patate.chaude.443' '1.2.3.4' '443' 'SSL' ''' ret = True if vserver_exists(v_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vserver = NSLBVServer() vserver.set_name(v_name) vserver.set_ipv46(v_ip) vserver.set_port(v_port) vserver.set_servicetype(v_type.upper()) try: NSLBVServer.add(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSLBVServer.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_delete(v_name, **connection_args): ''' Delete a lb vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_delete 'vserverName' ''' ret = True vserver = _vserver_get(v_name, **connection_args) if vserver is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSLBVServer.delete(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSVServer.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_servicegroup_get(v_name, sg_name, **connection_args): ret = None nitro = _connect(**connection_args) if nitro is None: return None vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) try: vsgs = NSLBVServerServiceGroupBinding.get(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.get() failed: %s', error) return None for vsg in vsgs: if vsg.get_servicegroupname() == sg_name: ret = vsg _disconnect(nitro) return ret def vserver_servicegroup_exists(v_name, sg_name, **connection_args): ''' Checks if a servicegroup is tied to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_exists 'vserverName' 'serviceGroupName' ''' return _vserver_servicegroup_get(v_name, sg_name, **connection_args) is not None def vserver_servicegroup_add(v_name, sg_name, **connection_args): ''' Bind a servicegroup to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_add 'vserverName' 'serviceGroupName' ''' ret = True if vserver_servicegroup_exists(v_name, sg_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) vsg.set_servicegroupname(sg_name) try: NSLBVServerServiceGroupBinding.add(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_servicegroup_delete(v_name, sg_name, **connection_args): ''' Unbind a servicegroup from a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_delete 'vserverName' 'serviceGroupName' ''' ret = True if not vserver_servicegroup_exists(v_name, sg_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) vsg.set_servicegroupname(sg_name) try: NSLBVServerServiceGroupBinding.delete(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_sslcert_get(v_name, sc_name, **connection_args): ret = None nitro = _connect(**connection_args) if nitro is None: return None sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) try: sslcerts = NSSSLVServerSSLCertKeyBinding.get(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.get() failed: %s', error) return None for sslcert in sslcerts: if sslcert.get_certkeyname() == sc_name: ret = sslcert return ret def vserver_sslcert_exists(v_name, sc_name, **connection_args): ''' Checks if a SSL certificate is tied to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_exists 'vserverName' 'sslCertificateName' ''' return _vserver_sslcert_get(v_name, sc_name, **connection_args) is not None def vserver_sslcert_add(v_name, sc_name, **connection_args): ''' Binds a SSL certificate to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_add 'vserverName' 'sslCertificateName' ''' ret = True if vserver_sslcert_exists(v_name, sc_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) sslcert.set_certkeyname(sc_name) try: NSSSLVServerSSLCertKeyBinding.add(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_sslcert_delete(v_name, sc_name, **connection_args): ''' Unbinds a SSL certificate from a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_delete 'vserverName' 'sslCertificateName' ''' ret = True if not vserver_sslcert_exists(v_name, sc_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) sslcert.set_certkeyname(sc_name) try: NSSSLVServerSSLCertKeyBinding.delete(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.delete() failed: %s', error) ret = False _disconnect(nitro) return ret
saltstack/salt
salt/modules/netscaler.py
servicegroup_server_exists
python
def servicegroup_server_exists(sg_name, s_name, s_port=None, **connection_args): ''' Check if a server:port combination is a member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_exists 'serviceGroupName' 'serverName' 'serverPort' ''' return _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) is not None
Check if a server:port combination is a member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_exists 'serviceGroupName' 'serverName' 'serverPort'
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/netscaler.py#L271-L281
[ "def _servicegroup_get_server(sg_name, s_name, s_port=None, **connection_args):\n '''\n Returns a member of a service group or None\n '''\n ret = None\n servers = _servicegroup_get_servers(sg_name, **connection_args)\n if servers is None:\n return None\n for server in servers:\n if server.get_servername() == s_name:\n if s_port is not None and s_port != server.get_port():\n ret = None\n ret = server\n return ret\n" ]
# -*- coding: utf-8 -*- ''' Module to provide Citrix Netscaler compatibility to Salt (compatible with netscaler 9.2+) .. versionadded:: 2015.2.0 :depends: - nsnitro Python module .. note:: You can install nsnitro using: .. code-block:: bash pip install nsnitro :configuration: This module accepts connection configuration details either as parameters, or as configuration settings in /etc/salt/minion on the relevant minions .. code-block:: yaml netscaler.host: 1.2.3.4 netscaler.user: user netscaler.pass: password This data can also be passed into pillar. Options passed into opts will overwrite options passed into pillar. :CLI Examples: Calls relying on configuration passed using /etc/salt/minion, grains, or pillars: .. code-block:: bash salt-call netscaler.server_exists server_name Calls passing configuration as opts .. code-block:: bash salt-call netscaler.server_exists server_name netscaler_host=1.2.3.4 netscaler_user=username netscaler_pass=password salt-call netscaler.server_exists server_name netscaler_host=1.2.3.5 netscaler_user=username2 netscaler_pass=password2 salt-call netscaler.server_enable server_name2 netscaler_host=1.2.3.5 salt-call netscaler.server_up server_name3 netscaler_host=1.2.3.6 netscaler_useSSL=False ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import logging # Import Salt libs import salt.utils.platform try: from nsnitro.nsnitro import NSNitro from nsnitro.nsexceptions import NSNitroError from nsnitro.nsresources.nsserver import NSServer from nsnitro.nsresources.nsservice import NSService from nsnitro.nsresources.nsservicegroup import NSServiceGroup from nsnitro.nsresources.nsservicegroupserverbinding import NSServiceGroupServerBinding from nsnitro.nsresources.nslbvserver import NSLBVServer from nsnitro.nsresources.nslbvserverservicegroupbinding import NSLBVServerServiceGroupBinding from nsnitro.nsresources.nssslvserversslcertkeybinding import NSSSLVServerSSLCertKeyBinding HAS_NSNITRO = True except ImportError: HAS_NSNITRO = False log = logging.getLogger(__name__) def __virtual__(): ''' Only load this module if the nsnitro library is installed ''' if salt.utils.platform.is_windows(): return ( False, 'The netscaler execution module failed to load: not available ' 'on Windows.' ) if HAS_NSNITRO: return 'netscaler' return ( False, 'The netscaler execution module failed to load: the nsnitro python ' 'library is not available.' ) def _connect(**kwargs): ''' Initialise netscaler connection ''' connargs = dict() # Shamelessy ripped from the mysql module def __connarg(name, key=None, default=None): ''' Add key to connargs, only if name exists in our kwargs or as netscaler.<name> in __opts__ or __pillar__ Evaluate in said order - kwargs, opts then pillar. To avoid collision with other functions, kwargs-based connection arguments are prefixed with 'netscaler_' (i.e. 'netscaler_host', 'netscaler_user', etc.). ''' if key is None: key = name if name in kwargs: connargs[key] = kwargs[name] else: prefix = 'netscaler_' if name.startswith(prefix): try: name = name[len(prefix):] except IndexError: return val = __salt__['config.option']('netscaler.{0}'.format(name), None) if val is not None: connargs[key] = val elif default is not None: connargs[key] = default __connarg('netscaler_host', 'host') __connarg('netscaler_user', 'user') __connarg('netscaler_pass', 'pass') __connarg('netscaler_useSSL', 'useSSL', True) nitro = NSNitro(connargs['host'], connargs['user'], connargs['pass'], connargs['useSSL']) try: nitro.login() except NSNitroError as error: log.debug('netscaler module error - NSNitro.login() failed: %s', error) return None return nitro def _disconnect(nitro): try: nitro.logout() except NSNitroError as error: log.debug('netscaler module error - NSNitro.logout() failed: %s', error) return None return nitro def _servicegroup_get(sg_name, **connection_args): ''' Return a service group ressource or None ''' nitro = _connect(**connection_args) if nitro is None: return None sg = NSServiceGroup() sg.set_servicegroupname(sg_name) try: sg = NSServiceGroup.get(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.get() failed: %s', error) sg = None _disconnect(nitro) return sg def _servicegroup_get_servers(sg_name, **connection_args): ''' Returns a list of members of a servicegroup or None ''' nitro = _connect(**connection_args) if nitro is None: return None sg = NSServiceGroup() sg.set_servicegroupname(sg_name) try: sg = NSServiceGroup.get_servers(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.get_servers failed(): %s', error) sg = None _disconnect(nitro) return sg def _servicegroup_get_server(sg_name, s_name, s_port=None, **connection_args): ''' Returns a member of a service group or None ''' ret = None servers = _servicegroup_get_servers(sg_name, **connection_args) if servers is None: return None for server in servers: if server.get_servername() == s_name: if s_port is not None and s_port != server.get_port(): ret = None ret = server return ret def servicegroup_exists(sg_name, sg_type=None, **connection_args): ''' Checks if a service group exists CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_exists 'serviceGroupName' ''' sg = _servicegroup_get(sg_name, **connection_args) if sg is None: return False if sg_type is not None and sg_type.upper() != sg.get_servicetype(): return False return True def servicegroup_add(sg_name, sg_type='HTTP', **connection_args): ''' Add a new service group If no service type is specified, HTTP will be used. Most common service types: HTTP, SSL, and SSL_BRIDGE CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_add 'serviceGroupName' salt '*' netscaler.servicegroup_add 'serviceGroupName' 'serviceGroupType' ''' ret = True if servicegroup_exists(sg_name): return False nitro = _connect(**connection_args) if nitro is None: return False sg = NSServiceGroup() sg.set_servicegroupname(sg_name) sg.set_servicetype(sg_type.upper()) try: NSServiceGroup.add(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.add() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_delete(sg_name, **connection_args): ''' Delete a new service group CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_delete 'serviceGroupName' ''' ret = True sg = _servicegroup_get(sg_name, **connection_args) if sg is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.delete(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_up(sg_name, s_name, s_port, **connection_args): ''' Check if a server:port combination is in state UP in a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_up 'serviceGroupName' 'serverName' 'serverPort' ''' server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) return server is not None and server.get_svrstate() == 'UP' def servicegroup_server_enable(sg_name, s_name, s_port, **connection_args): ''' Enable a server:port member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_enable 'serviceGroupName' 'serverName' 'serverPort' ''' ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.enable_server(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.enable_server() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_disable(sg_name, s_name, s_port, **connection_args): ''' Disable a server:port member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_disable 'serviceGroupName' 'serverName' 'serverPort' ''' ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.disable_server(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.disable_server() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_add(sg_name, s_name, s_port, **connection_args): ''' Add a server:port member to a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_add 'serviceGroupName' 'serverName' 'serverPort' ''' # Nitro will throw an error if the server is already present ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is not None: return False nitro = _connect(**connection_args) if nitro is None: return False sgsb = NSServiceGroupServerBinding() sgsb.set_servicegroupname(sg_name) sgsb.set_servername(s_name) sgsb.set_port(s_port) try: NSServiceGroupServerBinding.add(nitro, sgsb) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroupServerBinding() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_delete(sg_name, s_name, s_port, **connection_args): ''' Remove a server:port member from a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_delete 'serviceGroupName' 'serverName' 'serverPort' ''' # Nitro will throw an error if the server is already not present ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False sgsb = NSServiceGroupServerBinding() sgsb.set_servicegroupname(sg_name) sgsb.set_servername(s_name) sgsb.set_port(s_port) try: NSServiceGroupServerBinding.delete(nitro, sgsb) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroupServerBinding() failed: %s', error) ret = False _disconnect(nitro) return ret def _service_get(s_name, **connection_args): ''' Returns a service ressource or None ''' nitro = _connect(**connection_args) if nitro is None: return None service = NSService() service.set_name(s_name) try: service = NSService.get(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.get() failed: %s', error) service = None _disconnect(nitro) return service def service_exists(s_name, **connection_args): ''' Checks if a service exists CLI Example: .. code-block:: bash salt '*' netscaler.service_exists 'serviceName' ''' return _service_get(s_name, **connection_args) is not None def service_up(s_name, **connection_args): ''' Checks if a service is UP CLI Example: .. code-block:: bash salt '*' netscaler.service_up 'serviceName' ''' service = _service_get(s_name, **connection_args) return service is not None and service.get_svrstate() == 'UP' def service_enable(s_name, **connection_args): ''' Enable a service CLI Example: .. code-block:: bash salt '*' netscaler.service_enable 'serviceName' ''' ret = True service = _service_get(s_name, **connection_args) if service is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSService.enable(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def service_disable(s_name, s_delay=None, **connection_args): ''' Disable a service CLI Example: .. code-block:: bash salt '*' netscaler.service_disable 'serviceName' salt '*' netscaler.service_disable 'serviceName' 'delayInSeconds' ''' ret = True service = _service_get(s_name, **connection_args) if service is None: return False if s_delay is not None: service.set_delay(s_delay) nitro = _connect(**connection_args) if nitro is None: return False try: NSService.disable(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def _server_get(s_name, **connection_args): nitro = _connect(**connection_args) if nitro is None: return None server = NSServer() server.set_name(s_name) try: server = NSServer.get(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.get() failed: %s', error) server = None _disconnect(nitro) return server def server_exists(s_name, ip=None, s_state=None, **connection_args): ''' Checks if a server exists CLI Example: .. code-block:: bash salt '*' netscaler.server_exists 'serverName' ''' server = _server_get(s_name, **connection_args) if server is None: return False if ip is not None and ip != server.get_ipaddress(): return False if s_state is not None and s_state.upper() != server.get_state(): return False return True def server_add(s_name, s_ip, s_state=None, **connection_args): ''' Add a server Note: The default server state is ENABLED CLI Example: .. code-block:: bash salt '*' netscaler.server_add 'serverName' 'serverIpAddress' salt '*' netscaler.server_add 'serverName' 'serverIpAddress' 'serverState' ''' ret = True if server_exists(s_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False server = NSServer() server.set_name(s_name) server.set_ipaddress(s_ip) if s_state is not None: server.set_state(s_state) try: NSServer.add(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.add() failed: %s', error) ret = False _disconnect(nitro) return ret def server_delete(s_name, **connection_args): ''' Delete a server CLI Example: .. code-block:: bash salt '*' netscaler.server_delete 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.delete(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def server_update(s_name, s_ip, **connection_args): ''' Update a server's attributes CLI Example: .. code-block:: bash salt '*' netscaler.server_update 'serverName' 'serverIP' ''' altered = False cur_server = _server_get(s_name, **connection_args) if cur_server is None: return False alt_server = NSServer() alt_server.set_name(s_name) if cur_server.get_ipaddress() != s_ip: alt_server.set_ipaddress(s_ip) altered = True # Nothing to update, the server is already idem if altered is False: return False # Perform the update nitro = _connect(**connection_args) if nitro is None: return False ret = True try: NSServer.update(nitro, alt_server) except NSNitroError as error: log.debug('netscaler module error - NSServer.update() failed: %s', error) ret = False _disconnect(nitro) return ret def server_enabled(s_name, **connection_args): ''' Check if a server is enabled globally CLI Example: .. code-block:: bash salt '*' netscaler.server_enabled 'serverName' ''' server = _server_get(s_name, **connection_args) return server is not None and server.get_state() == 'ENABLED' def server_enable(s_name, **connection_args): ''' Enables a server globally CLI Example: .. code-block:: bash salt '*' netscaler.server_enable 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False if server.get_state() == 'ENABLED': return True nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.enable(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def server_disable(s_name, **connection_args): ''' Disable a server globally CLI Example: .. code-block:: bash salt '*' netscaler.server_disable 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False if server.get_state() == 'DISABLED': return True nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.disable(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.disable() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_get(v_name, **connection_args): nitro = _connect(**connection_args) vserver = NSLBVServer() vserver.set_name(v_name) if nitro is None: return None try: vserver = NSLBVServer.get(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSLBVServer.get() failed: %s', error) vserver = None _disconnect(nitro) return vserver def vserver_exists(v_name, v_ip=None, v_port=None, v_type=None, **connection_args): ''' Checks if a vserver exists CLI Example: .. code-block:: bash salt '*' netscaler.vserver_exists 'vserverName' ''' vserver = _vserver_get(v_name, **connection_args) if vserver is None: return False if v_ip is not None and vserver.get_ipv46() != v_ip: return False if v_port is not None and vserver.get_port() != v_port: return False if v_type is not None and vserver.get_servicetype().upper() != v_type.upper(): return False return True def vserver_add(v_name, v_ip, v_port, v_type, **connection_args): ''' Add a new lb vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_add 'vserverName' 'vserverIP' 'vserverPort' 'vserverType' salt '*' netscaler.vserver_add 'alex.patate.chaude.443' '1.2.3.4' '443' 'SSL' ''' ret = True if vserver_exists(v_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vserver = NSLBVServer() vserver.set_name(v_name) vserver.set_ipv46(v_ip) vserver.set_port(v_port) vserver.set_servicetype(v_type.upper()) try: NSLBVServer.add(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSLBVServer.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_delete(v_name, **connection_args): ''' Delete a lb vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_delete 'vserverName' ''' ret = True vserver = _vserver_get(v_name, **connection_args) if vserver is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSLBVServer.delete(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSVServer.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_servicegroup_get(v_name, sg_name, **connection_args): ret = None nitro = _connect(**connection_args) if nitro is None: return None vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) try: vsgs = NSLBVServerServiceGroupBinding.get(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.get() failed: %s', error) return None for vsg in vsgs: if vsg.get_servicegroupname() == sg_name: ret = vsg _disconnect(nitro) return ret def vserver_servicegroup_exists(v_name, sg_name, **connection_args): ''' Checks if a servicegroup is tied to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_exists 'vserverName' 'serviceGroupName' ''' return _vserver_servicegroup_get(v_name, sg_name, **connection_args) is not None def vserver_servicegroup_add(v_name, sg_name, **connection_args): ''' Bind a servicegroup to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_add 'vserverName' 'serviceGroupName' ''' ret = True if vserver_servicegroup_exists(v_name, sg_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) vsg.set_servicegroupname(sg_name) try: NSLBVServerServiceGroupBinding.add(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_servicegroup_delete(v_name, sg_name, **connection_args): ''' Unbind a servicegroup from a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_delete 'vserverName' 'serviceGroupName' ''' ret = True if not vserver_servicegroup_exists(v_name, sg_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) vsg.set_servicegroupname(sg_name) try: NSLBVServerServiceGroupBinding.delete(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_sslcert_get(v_name, sc_name, **connection_args): ret = None nitro = _connect(**connection_args) if nitro is None: return None sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) try: sslcerts = NSSSLVServerSSLCertKeyBinding.get(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.get() failed: %s', error) return None for sslcert in sslcerts: if sslcert.get_certkeyname() == sc_name: ret = sslcert return ret def vserver_sslcert_exists(v_name, sc_name, **connection_args): ''' Checks if a SSL certificate is tied to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_exists 'vserverName' 'sslCertificateName' ''' return _vserver_sslcert_get(v_name, sc_name, **connection_args) is not None def vserver_sslcert_add(v_name, sc_name, **connection_args): ''' Binds a SSL certificate to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_add 'vserverName' 'sslCertificateName' ''' ret = True if vserver_sslcert_exists(v_name, sc_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) sslcert.set_certkeyname(sc_name) try: NSSSLVServerSSLCertKeyBinding.add(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_sslcert_delete(v_name, sc_name, **connection_args): ''' Unbinds a SSL certificate from a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_delete 'vserverName' 'sslCertificateName' ''' ret = True if not vserver_sslcert_exists(v_name, sc_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) sslcert.set_certkeyname(sc_name) try: NSSSLVServerSSLCertKeyBinding.delete(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.delete() failed: %s', error) ret = False _disconnect(nitro) return ret
saltstack/salt
salt/modules/netscaler.py
servicegroup_server_up
python
def servicegroup_server_up(sg_name, s_name, s_port, **connection_args): ''' Check if a server:port combination is in state UP in a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_up 'serviceGroupName' 'serverName' 'serverPort' ''' server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) return server is not None and server.get_svrstate() == 'UP'
Check if a server:port combination is in state UP in a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_up 'serviceGroupName' 'serverName' 'serverPort'
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/netscaler.py#L284-L295
[ "def _servicegroup_get_server(sg_name, s_name, s_port=None, **connection_args):\n '''\n Returns a member of a service group or None\n '''\n ret = None\n servers = _servicegroup_get_servers(sg_name, **connection_args)\n if servers is None:\n return None\n for server in servers:\n if server.get_servername() == s_name:\n if s_port is not None and s_port != server.get_port():\n ret = None\n ret = server\n return ret\n" ]
# -*- coding: utf-8 -*- ''' Module to provide Citrix Netscaler compatibility to Salt (compatible with netscaler 9.2+) .. versionadded:: 2015.2.0 :depends: - nsnitro Python module .. note:: You can install nsnitro using: .. code-block:: bash pip install nsnitro :configuration: This module accepts connection configuration details either as parameters, or as configuration settings in /etc/salt/minion on the relevant minions .. code-block:: yaml netscaler.host: 1.2.3.4 netscaler.user: user netscaler.pass: password This data can also be passed into pillar. Options passed into opts will overwrite options passed into pillar. :CLI Examples: Calls relying on configuration passed using /etc/salt/minion, grains, or pillars: .. code-block:: bash salt-call netscaler.server_exists server_name Calls passing configuration as opts .. code-block:: bash salt-call netscaler.server_exists server_name netscaler_host=1.2.3.4 netscaler_user=username netscaler_pass=password salt-call netscaler.server_exists server_name netscaler_host=1.2.3.5 netscaler_user=username2 netscaler_pass=password2 salt-call netscaler.server_enable server_name2 netscaler_host=1.2.3.5 salt-call netscaler.server_up server_name3 netscaler_host=1.2.3.6 netscaler_useSSL=False ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import logging # Import Salt libs import salt.utils.platform try: from nsnitro.nsnitro import NSNitro from nsnitro.nsexceptions import NSNitroError from nsnitro.nsresources.nsserver import NSServer from nsnitro.nsresources.nsservice import NSService from nsnitro.nsresources.nsservicegroup import NSServiceGroup from nsnitro.nsresources.nsservicegroupserverbinding import NSServiceGroupServerBinding from nsnitro.nsresources.nslbvserver import NSLBVServer from nsnitro.nsresources.nslbvserverservicegroupbinding import NSLBVServerServiceGroupBinding from nsnitro.nsresources.nssslvserversslcertkeybinding import NSSSLVServerSSLCertKeyBinding HAS_NSNITRO = True except ImportError: HAS_NSNITRO = False log = logging.getLogger(__name__) def __virtual__(): ''' Only load this module if the nsnitro library is installed ''' if salt.utils.platform.is_windows(): return ( False, 'The netscaler execution module failed to load: not available ' 'on Windows.' ) if HAS_NSNITRO: return 'netscaler' return ( False, 'The netscaler execution module failed to load: the nsnitro python ' 'library is not available.' ) def _connect(**kwargs): ''' Initialise netscaler connection ''' connargs = dict() # Shamelessy ripped from the mysql module def __connarg(name, key=None, default=None): ''' Add key to connargs, only if name exists in our kwargs or as netscaler.<name> in __opts__ or __pillar__ Evaluate in said order - kwargs, opts then pillar. To avoid collision with other functions, kwargs-based connection arguments are prefixed with 'netscaler_' (i.e. 'netscaler_host', 'netscaler_user', etc.). ''' if key is None: key = name if name in kwargs: connargs[key] = kwargs[name] else: prefix = 'netscaler_' if name.startswith(prefix): try: name = name[len(prefix):] except IndexError: return val = __salt__['config.option']('netscaler.{0}'.format(name), None) if val is not None: connargs[key] = val elif default is not None: connargs[key] = default __connarg('netscaler_host', 'host') __connarg('netscaler_user', 'user') __connarg('netscaler_pass', 'pass') __connarg('netscaler_useSSL', 'useSSL', True) nitro = NSNitro(connargs['host'], connargs['user'], connargs['pass'], connargs['useSSL']) try: nitro.login() except NSNitroError as error: log.debug('netscaler module error - NSNitro.login() failed: %s', error) return None return nitro def _disconnect(nitro): try: nitro.logout() except NSNitroError as error: log.debug('netscaler module error - NSNitro.logout() failed: %s', error) return None return nitro def _servicegroup_get(sg_name, **connection_args): ''' Return a service group ressource or None ''' nitro = _connect(**connection_args) if nitro is None: return None sg = NSServiceGroup() sg.set_servicegroupname(sg_name) try: sg = NSServiceGroup.get(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.get() failed: %s', error) sg = None _disconnect(nitro) return sg def _servicegroup_get_servers(sg_name, **connection_args): ''' Returns a list of members of a servicegroup or None ''' nitro = _connect(**connection_args) if nitro is None: return None sg = NSServiceGroup() sg.set_servicegroupname(sg_name) try: sg = NSServiceGroup.get_servers(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.get_servers failed(): %s', error) sg = None _disconnect(nitro) return sg def _servicegroup_get_server(sg_name, s_name, s_port=None, **connection_args): ''' Returns a member of a service group or None ''' ret = None servers = _servicegroup_get_servers(sg_name, **connection_args) if servers is None: return None for server in servers: if server.get_servername() == s_name: if s_port is not None and s_port != server.get_port(): ret = None ret = server return ret def servicegroup_exists(sg_name, sg_type=None, **connection_args): ''' Checks if a service group exists CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_exists 'serviceGroupName' ''' sg = _servicegroup_get(sg_name, **connection_args) if sg is None: return False if sg_type is not None and sg_type.upper() != sg.get_servicetype(): return False return True def servicegroup_add(sg_name, sg_type='HTTP', **connection_args): ''' Add a new service group If no service type is specified, HTTP will be used. Most common service types: HTTP, SSL, and SSL_BRIDGE CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_add 'serviceGroupName' salt '*' netscaler.servicegroup_add 'serviceGroupName' 'serviceGroupType' ''' ret = True if servicegroup_exists(sg_name): return False nitro = _connect(**connection_args) if nitro is None: return False sg = NSServiceGroup() sg.set_servicegroupname(sg_name) sg.set_servicetype(sg_type.upper()) try: NSServiceGroup.add(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.add() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_delete(sg_name, **connection_args): ''' Delete a new service group CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_delete 'serviceGroupName' ''' ret = True sg = _servicegroup_get(sg_name, **connection_args) if sg is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.delete(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_exists(sg_name, s_name, s_port=None, **connection_args): ''' Check if a server:port combination is a member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_exists 'serviceGroupName' 'serverName' 'serverPort' ''' return _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) is not None def servicegroup_server_enable(sg_name, s_name, s_port, **connection_args): ''' Enable a server:port member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_enable 'serviceGroupName' 'serverName' 'serverPort' ''' ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.enable_server(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.enable_server() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_disable(sg_name, s_name, s_port, **connection_args): ''' Disable a server:port member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_disable 'serviceGroupName' 'serverName' 'serverPort' ''' ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.disable_server(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.disable_server() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_add(sg_name, s_name, s_port, **connection_args): ''' Add a server:port member to a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_add 'serviceGroupName' 'serverName' 'serverPort' ''' # Nitro will throw an error if the server is already present ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is not None: return False nitro = _connect(**connection_args) if nitro is None: return False sgsb = NSServiceGroupServerBinding() sgsb.set_servicegroupname(sg_name) sgsb.set_servername(s_name) sgsb.set_port(s_port) try: NSServiceGroupServerBinding.add(nitro, sgsb) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroupServerBinding() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_delete(sg_name, s_name, s_port, **connection_args): ''' Remove a server:port member from a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_delete 'serviceGroupName' 'serverName' 'serverPort' ''' # Nitro will throw an error if the server is already not present ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False sgsb = NSServiceGroupServerBinding() sgsb.set_servicegroupname(sg_name) sgsb.set_servername(s_name) sgsb.set_port(s_port) try: NSServiceGroupServerBinding.delete(nitro, sgsb) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroupServerBinding() failed: %s', error) ret = False _disconnect(nitro) return ret def _service_get(s_name, **connection_args): ''' Returns a service ressource or None ''' nitro = _connect(**connection_args) if nitro is None: return None service = NSService() service.set_name(s_name) try: service = NSService.get(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.get() failed: %s', error) service = None _disconnect(nitro) return service def service_exists(s_name, **connection_args): ''' Checks if a service exists CLI Example: .. code-block:: bash salt '*' netscaler.service_exists 'serviceName' ''' return _service_get(s_name, **connection_args) is not None def service_up(s_name, **connection_args): ''' Checks if a service is UP CLI Example: .. code-block:: bash salt '*' netscaler.service_up 'serviceName' ''' service = _service_get(s_name, **connection_args) return service is not None and service.get_svrstate() == 'UP' def service_enable(s_name, **connection_args): ''' Enable a service CLI Example: .. code-block:: bash salt '*' netscaler.service_enable 'serviceName' ''' ret = True service = _service_get(s_name, **connection_args) if service is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSService.enable(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def service_disable(s_name, s_delay=None, **connection_args): ''' Disable a service CLI Example: .. code-block:: bash salt '*' netscaler.service_disable 'serviceName' salt '*' netscaler.service_disable 'serviceName' 'delayInSeconds' ''' ret = True service = _service_get(s_name, **connection_args) if service is None: return False if s_delay is not None: service.set_delay(s_delay) nitro = _connect(**connection_args) if nitro is None: return False try: NSService.disable(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def _server_get(s_name, **connection_args): nitro = _connect(**connection_args) if nitro is None: return None server = NSServer() server.set_name(s_name) try: server = NSServer.get(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.get() failed: %s', error) server = None _disconnect(nitro) return server def server_exists(s_name, ip=None, s_state=None, **connection_args): ''' Checks if a server exists CLI Example: .. code-block:: bash salt '*' netscaler.server_exists 'serverName' ''' server = _server_get(s_name, **connection_args) if server is None: return False if ip is not None and ip != server.get_ipaddress(): return False if s_state is not None and s_state.upper() != server.get_state(): return False return True def server_add(s_name, s_ip, s_state=None, **connection_args): ''' Add a server Note: The default server state is ENABLED CLI Example: .. code-block:: bash salt '*' netscaler.server_add 'serverName' 'serverIpAddress' salt '*' netscaler.server_add 'serverName' 'serverIpAddress' 'serverState' ''' ret = True if server_exists(s_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False server = NSServer() server.set_name(s_name) server.set_ipaddress(s_ip) if s_state is not None: server.set_state(s_state) try: NSServer.add(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.add() failed: %s', error) ret = False _disconnect(nitro) return ret def server_delete(s_name, **connection_args): ''' Delete a server CLI Example: .. code-block:: bash salt '*' netscaler.server_delete 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.delete(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def server_update(s_name, s_ip, **connection_args): ''' Update a server's attributes CLI Example: .. code-block:: bash salt '*' netscaler.server_update 'serverName' 'serverIP' ''' altered = False cur_server = _server_get(s_name, **connection_args) if cur_server is None: return False alt_server = NSServer() alt_server.set_name(s_name) if cur_server.get_ipaddress() != s_ip: alt_server.set_ipaddress(s_ip) altered = True # Nothing to update, the server is already idem if altered is False: return False # Perform the update nitro = _connect(**connection_args) if nitro is None: return False ret = True try: NSServer.update(nitro, alt_server) except NSNitroError as error: log.debug('netscaler module error - NSServer.update() failed: %s', error) ret = False _disconnect(nitro) return ret def server_enabled(s_name, **connection_args): ''' Check if a server is enabled globally CLI Example: .. code-block:: bash salt '*' netscaler.server_enabled 'serverName' ''' server = _server_get(s_name, **connection_args) return server is not None and server.get_state() == 'ENABLED' def server_enable(s_name, **connection_args): ''' Enables a server globally CLI Example: .. code-block:: bash salt '*' netscaler.server_enable 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False if server.get_state() == 'ENABLED': return True nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.enable(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def server_disable(s_name, **connection_args): ''' Disable a server globally CLI Example: .. code-block:: bash salt '*' netscaler.server_disable 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False if server.get_state() == 'DISABLED': return True nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.disable(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.disable() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_get(v_name, **connection_args): nitro = _connect(**connection_args) vserver = NSLBVServer() vserver.set_name(v_name) if nitro is None: return None try: vserver = NSLBVServer.get(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSLBVServer.get() failed: %s', error) vserver = None _disconnect(nitro) return vserver def vserver_exists(v_name, v_ip=None, v_port=None, v_type=None, **connection_args): ''' Checks if a vserver exists CLI Example: .. code-block:: bash salt '*' netscaler.vserver_exists 'vserverName' ''' vserver = _vserver_get(v_name, **connection_args) if vserver is None: return False if v_ip is not None and vserver.get_ipv46() != v_ip: return False if v_port is not None and vserver.get_port() != v_port: return False if v_type is not None and vserver.get_servicetype().upper() != v_type.upper(): return False return True def vserver_add(v_name, v_ip, v_port, v_type, **connection_args): ''' Add a new lb vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_add 'vserverName' 'vserverIP' 'vserverPort' 'vserverType' salt '*' netscaler.vserver_add 'alex.patate.chaude.443' '1.2.3.4' '443' 'SSL' ''' ret = True if vserver_exists(v_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vserver = NSLBVServer() vserver.set_name(v_name) vserver.set_ipv46(v_ip) vserver.set_port(v_port) vserver.set_servicetype(v_type.upper()) try: NSLBVServer.add(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSLBVServer.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_delete(v_name, **connection_args): ''' Delete a lb vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_delete 'vserverName' ''' ret = True vserver = _vserver_get(v_name, **connection_args) if vserver is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSLBVServer.delete(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSVServer.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_servicegroup_get(v_name, sg_name, **connection_args): ret = None nitro = _connect(**connection_args) if nitro is None: return None vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) try: vsgs = NSLBVServerServiceGroupBinding.get(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.get() failed: %s', error) return None for vsg in vsgs: if vsg.get_servicegroupname() == sg_name: ret = vsg _disconnect(nitro) return ret def vserver_servicegroup_exists(v_name, sg_name, **connection_args): ''' Checks if a servicegroup is tied to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_exists 'vserverName' 'serviceGroupName' ''' return _vserver_servicegroup_get(v_name, sg_name, **connection_args) is not None def vserver_servicegroup_add(v_name, sg_name, **connection_args): ''' Bind a servicegroup to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_add 'vserverName' 'serviceGroupName' ''' ret = True if vserver_servicegroup_exists(v_name, sg_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) vsg.set_servicegroupname(sg_name) try: NSLBVServerServiceGroupBinding.add(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_servicegroup_delete(v_name, sg_name, **connection_args): ''' Unbind a servicegroup from a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_delete 'vserverName' 'serviceGroupName' ''' ret = True if not vserver_servicegroup_exists(v_name, sg_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) vsg.set_servicegroupname(sg_name) try: NSLBVServerServiceGroupBinding.delete(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_sslcert_get(v_name, sc_name, **connection_args): ret = None nitro = _connect(**connection_args) if nitro is None: return None sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) try: sslcerts = NSSSLVServerSSLCertKeyBinding.get(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.get() failed: %s', error) return None for sslcert in sslcerts: if sslcert.get_certkeyname() == sc_name: ret = sslcert return ret def vserver_sslcert_exists(v_name, sc_name, **connection_args): ''' Checks if a SSL certificate is tied to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_exists 'vserverName' 'sslCertificateName' ''' return _vserver_sslcert_get(v_name, sc_name, **connection_args) is not None def vserver_sslcert_add(v_name, sc_name, **connection_args): ''' Binds a SSL certificate to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_add 'vserverName' 'sslCertificateName' ''' ret = True if vserver_sslcert_exists(v_name, sc_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) sslcert.set_certkeyname(sc_name) try: NSSSLVServerSSLCertKeyBinding.add(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_sslcert_delete(v_name, sc_name, **connection_args): ''' Unbinds a SSL certificate from a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_delete 'vserverName' 'sslCertificateName' ''' ret = True if not vserver_sslcert_exists(v_name, sc_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) sslcert.set_certkeyname(sc_name) try: NSSSLVServerSSLCertKeyBinding.delete(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.delete() failed: %s', error) ret = False _disconnect(nitro) return ret
saltstack/salt
salt/modules/netscaler.py
servicegroup_server_enable
python
def servicegroup_server_enable(sg_name, s_name, s_port, **connection_args): ''' Enable a server:port member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_enable 'serviceGroupName' 'serverName' 'serverPort' ''' ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.enable_server(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.enable_server() failed: %s', error) ret = False _disconnect(nitro) return ret
Enable a server:port member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_enable 'serviceGroupName' 'serverName' 'serverPort'
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/netscaler.py#L298-L321
[ "def _connect(**kwargs):\n '''\n Initialise netscaler connection\n '''\n connargs = dict()\n\n # Shamelessy ripped from the mysql module\n def __connarg(name, key=None, default=None):\n '''\n Add key to connargs, only if name exists in our kwargs or as\n netscaler.<name> in __opts__ or __pillar__ Evaluate in said order - kwargs,\n opts then pillar. To avoid collision with other functions, kwargs-based\n connection arguments are prefixed with 'netscaler_' (i.e.\n 'netscaler_host', 'netscaler_user', etc.).\n '''\n if key is None:\n key = name\n if name in kwargs:\n connargs[key] = kwargs[name]\n else:\n prefix = 'netscaler_'\n if name.startswith(prefix):\n try:\n name = name[len(prefix):]\n except IndexError:\n return\n val = __salt__['config.option']('netscaler.{0}'.format(name), None)\n if val is not None:\n connargs[key] = val\n elif default is not None:\n connargs[key] = default\n\n __connarg('netscaler_host', 'host')\n __connarg('netscaler_user', 'user')\n __connarg('netscaler_pass', 'pass')\n __connarg('netscaler_useSSL', 'useSSL', True)\n\n nitro = NSNitro(connargs['host'], connargs['user'], connargs['pass'], connargs['useSSL'])\n try:\n nitro.login()\n except NSNitroError as error:\n log.debug('netscaler module error - NSNitro.login() failed: %s', error)\n return None\n return nitro\n", "def _disconnect(nitro):\n try:\n nitro.logout()\n except NSNitroError as error:\n log.debug('netscaler module error - NSNitro.logout() failed: %s', error)\n return None\n return nitro\n", "def _servicegroup_get_server(sg_name, s_name, s_port=None, **connection_args):\n '''\n Returns a member of a service group or None\n '''\n ret = None\n servers = _servicegroup_get_servers(sg_name, **connection_args)\n if servers is None:\n return None\n for server in servers:\n if server.get_servername() == s_name:\n if s_port is not None and s_port != server.get_port():\n ret = None\n ret = server\n return ret\n" ]
# -*- coding: utf-8 -*- ''' Module to provide Citrix Netscaler compatibility to Salt (compatible with netscaler 9.2+) .. versionadded:: 2015.2.0 :depends: - nsnitro Python module .. note:: You can install nsnitro using: .. code-block:: bash pip install nsnitro :configuration: This module accepts connection configuration details either as parameters, or as configuration settings in /etc/salt/minion on the relevant minions .. code-block:: yaml netscaler.host: 1.2.3.4 netscaler.user: user netscaler.pass: password This data can also be passed into pillar. Options passed into opts will overwrite options passed into pillar. :CLI Examples: Calls relying on configuration passed using /etc/salt/minion, grains, or pillars: .. code-block:: bash salt-call netscaler.server_exists server_name Calls passing configuration as opts .. code-block:: bash salt-call netscaler.server_exists server_name netscaler_host=1.2.3.4 netscaler_user=username netscaler_pass=password salt-call netscaler.server_exists server_name netscaler_host=1.2.3.5 netscaler_user=username2 netscaler_pass=password2 salt-call netscaler.server_enable server_name2 netscaler_host=1.2.3.5 salt-call netscaler.server_up server_name3 netscaler_host=1.2.3.6 netscaler_useSSL=False ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import logging # Import Salt libs import salt.utils.platform try: from nsnitro.nsnitro import NSNitro from nsnitro.nsexceptions import NSNitroError from nsnitro.nsresources.nsserver import NSServer from nsnitro.nsresources.nsservice import NSService from nsnitro.nsresources.nsservicegroup import NSServiceGroup from nsnitro.nsresources.nsservicegroupserverbinding import NSServiceGroupServerBinding from nsnitro.nsresources.nslbvserver import NSLBVServer from nsnitro.nsresources.nslbvserverservicegroupbinding import NSLBVServerServiceGroupBinding from nsnitro.nsresources.nssslvserversslcertkeybinding import NSSSLVServerSSLCertKeyBinding HAS_NSNITRO = True except ImportError: HAS_NSNITRO = False log = logging.getLogger(__name__) def __virtual__(): ''' Only load this module if the nsnitro library is installed ''' if salt.utils.platform.is_windows(): return ( False, 'The netscaler execution module failed to load: not available ' 'on Windows.' ) if HAS_NSNITRO: return 'netscaler' return ( False, 'The netscaler execution module failed to load: the nsnitro python ' 'library is not available.' ) def _connect(**kwargs): ''' Initialise netscaler connection ''' connargs = dict() # Shamelessy ripped from the mysql module def __connarg(name, key=None, default=None): ''' Add key to connargs, only if name exists in our kwargs or as netscaler.<name> in __opts__ or __pillar__ Evaluate in said order - kwargs, opts then pillar. To avoid collision with other functions, kwargs-based connection arguments are prefixed with 'netscaler_' (i.e. 'netscaler_host', 'netscaler_user', etc.). ''' if key is None: key = name if name in kwargs: connargs[key] = kwargs[name] else: prefix = 'netscaler_' if name.startswith(prefix): try: name = name[len(prefix):] except IndexError: return val = __salt__['config.option']('netscaler.{0}'.format(name), None) if val is not None: connargs[key] = val elif default is not None: connargs[key] = default __connarg('netscaler_host', 'host') __connarg('netscaler_user', 'user') __connarg('netscaler_pass', 'pass') __connarg('netscaler_useSSL', 'useSSL', True) nitro = NSNitro(connargs['host'], connargs['user'], connargs['pass'], connargs['useSSL']) try: nitro.login() except NSNitroError as error: log.debug('netscaler module error - NSNitro.login() failed: %s', error) return None return nitro def _disconnect(nitro): try: nitro.logout() except NSNitroError as error: log.debug('netscaler module error - NSNitro.logout() failed: %s', error) return None return nitro def _servicegroup_get(sg_name, **connection_args): ''' Return a service group ressource or None ''' nitro = _connect(**connection_args) if nitro is None: return None sg = NSServiceGroup() sg.set_servicegroupname(sg_name) try: sg = NSServiceGroup.get(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.get() failed: %s', error) sg = None _disconnect(nitro) return sg def _servicegroup_get_servers(sg_name, **connection_args): ''' Returns a list of members of a servicegroup or None ''' nitro = _connect(**connection_args) if nitro is None: return None sg = NSServiceGroup() sg.set_servicegroupname(sg_name) try: sg = NSServiceGroup.get_servers(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.get_servers failed(): %s', error) sg = None _disconnect(nitro) return sg def _servicegroup_get_server(sg_name, s_name, s_port=None, **connection_args): ''' Returns a member of a service group or None ''' ret = None servers = _servicegroup_get_servers(sg_name, **connection_args) if servers is None: return None for server in servers: if server.get_servername() == s_name: if s_port is not None and s_port != server.get_port(): ret = None ret = server return ret def servicegroup_exists(sg_name, sg_type=None, **connection_args): ''' Checks if a service group exists CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_exists 'serviceGroupName' ''' sg = _servicegroup_get(sg_name, **connection_args) if sg is None: return False if sg_type is not None and sg_type.upper() != sg.get_servicetype(): return False return True def servicegroup_add(sg_name, sg_type='HTTP', **connection_args): ''' Add a new service group If no service type is specified, HTTP will be used. Most common service types: HTTP, SSL, and SSL_BRIDGE CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_add 'serviceGroupName' salt '*' netscaler.servicegroup_add 'serviceGroupName' 'serviceGroupType' ''' ret = True if servicegroup_exists(sg_name): return False nitro = _connect(**connection_args) if nitro is None: return False sg = NSServiceGroup() sg.set_servicegroupname(sg_name) sg.set_servicetype(sg_type.upper()) try: NSServiceGroup.add(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.add() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_delete(sg_name, **connection_args): ''' Delete a new service group CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_delete 'serviceGroupName' ''' ret = True sg = _servicegroup_get(sg_name, **connection_args) if sg is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.delete(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_exists(sg_name, s_name, s_port=None, **connection_args): ''' Check if a server:port combination is a member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_exists 'serviceGroupName' 'serverName' 'serverPort' ''' return _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) is not None def servicegroup_server_up(sg_name, s_name, s_port, **connection_args): ''' Check if a server:port combination is in state UP in a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_up 'serviceGroupName' 'serverName' 'serverPort' ''' server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) return server is not None and server.get_svrstate() == 'UP' def servicegroup_server_disable(sg_name, s_name, s_port, **connection_args): ''' Disable a server:port member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_disable 'serviceGroupName' 'serverName' 'serverPort' ''' ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.disable_server(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.disable_server() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_add(sg_name, s_name, s_port, **connection_args): ''' Add a server:port member to a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_add 'serviceGroupName' 'serverName' 'serverPort' ''' # Nitro will throw an error if the server is already present ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is not None: return False nitro = _connect(**connection_args) if nitro is None: return False sgsb = NSServiceGroupServerBinding() sgsb.set_servicegroupname(sg_name) sgsb.set_servername(s_name) sgsb.set_port(s_port) try: NSServiceGroupServerBinding.add(nitro, sgsb) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroupServerBinding() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_delete(sg_name, s_name, s_port, **connection_args): ''' Remove a server:port member from a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_delete 'serviceGroupName' 'serverName' 'serverPort' ''' # Nitro will throw an error if the server is already not present ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False sgsb = NSServiceGroupServerBinding() sgsb.set_servicegroupname(sg_name) sgsb.set_servername(s_name) sgsb.set_port(s_port) try: NSServiceGroupServerBinding.delete(nitro, sgsb) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroupServerBinding() failed: %s', error) ret = False _disconnect(nitro) return ret def _service_get(s_name, **connection_args): ''' Returns a service ressource or None ''' nitro = _connect(**connection_args) if nitro is None: return None service = NSService() service.set_name(s_name) try: service = NSService.get(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.get() failed: %s', error) service = None _disconnect(nitro) return service def service_exists(s_name, **connection_args): ''' Checks if a service exists CLI Example: .. code-block:: bash salt '*' netscaler.service_exists 'serviceName' ''' return _service_get(s_name, **connection_args) is not None def service_up(s_name, **connection_args): ''' Checks if a service is UP CLI Example: .. code-block:: bash salt '*' netscaler.service_up 'serviceName' ''' service = _service_get(s_name, **connection_args) return service is not None and service.get_svrstate() == 'UP' def service_enable(s_name, **connection_args): ''' Enable a service CLI Example: .. code-block:: bash salt '*' netscaler.service_enable 'serviceName' ''' ret = True service = _service_get(s_name, **connection_args) if service is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSService.enable(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def service_disable(s_name, s_delay=None, **connection_args): ''' Disable a service CLI Example: .. code-block:: bash salt '*' netscaler.service_disable 'serviceName' salt '*' netscaler.service_disable 'serviceName' 'delayInSeconds' ''' ret = True service = _service_get(s_name, **connection_args) if service is None: return False if s_delay is not None: service.set_delay(s_delay) nitro = _connect(**connection_args) if nitro is None: return False try: NSService.disable(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def _server_get(s_name, **connection_args): nitro = _connect(**connection_args) if nitro is None: return None server = NSServer() server.set_name(s_name) try: server = NSServer.get(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.get() failed: %s', error) server = None _disconnect(nitro) return server def server_exists(s_name, ip=None, s_state=None, **connection_args): ''' Checks if a server exists CLI Example: .. code-block:: bash salt '*' netscaler.server_exists 'serverName' ''' server = _server_get(s_name, **connection_args) if server is None: return False if ip is not None and ip != server.get_ipaddress(): return False if s_state is not None and s_state.upper() != server.get_state(): return False return True def server_add(s_name, s_ip, s_state=None, **connection_args): ''' Add a server Note: The default server state is ENABLED CLI Example: .. code-block:: bash salt '*' netscaler.server_add 'serverName' 'serverIpAddress' salt '*' netscaler.server_add 'serverName' 'serverIpAddress' 'serverState' ''' ret = True if server_exists(s_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False server = NSServer() server.set_name(s_name) server.set_ipaddress(s_ip) if s_state is not None: server.set_state(s_state) try: NSServer.add(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.add() failed: %s', error) ret = False _disconnect(nitro) return ret def server_delete(s_name, **connection_args): ''' Delete a server CLI Example: .. code-block:: bash salt '*' netscaler.server_delete 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.delete(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def server_update(s_name, s_ip, **connection_args): ''' Update a server's attributes CLI Example: .. code-block:: bash salt '*' netscaler.server_update 'serverName' 'serverIP' ''' altered = False cur_server = _server_get(s_name, **connection_args) if cur_server is None: return False alt_server = NSServer() alt_server.set_name(s_name) if cur_server.get_ipaddress() != s_ip: alt_server.set_ipaddress(s_ip) altered = True # Nothing to update, the server is already idem if altered is False: return False # Perform the update nitro = _connect(**connection_args) if nitro is None: return False ret = True try: NSServer.update(nitro, alt_server) except NSNitroError as error: log.debug('netscaler module error - NSServer.update() failed: %s', error) ret = False _disconnect(nitro) return ret def server_enabled(s_name, **connection_args): ''' Check if a server is enabled globally CLI Example: .. code-block:: bash salt '*' netscaler.server_enabled 'serverName' ''' server = _server_get(s_name, **connection_args) return server is not None and server.get_state() == 'ENABLED' def server_enable(s_name, **connection_args): ''' Enables a server globally CLI Example: .. code-block:: bash salt '*' netscaler.server_enable 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False if server.get_state() == 'ENABLED': return True nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.enable(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def server_disable(s_name, **connection_args): ''' Disable a server globally CLI Example: .. code-block:: bash salt '*' netscaler.server_disable 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False if server.get_state() == 'DISABLED': return True nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.disable(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.disable() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_get(v_name, **connection_args): nitro = _connect(**connection_args) vserver = NSLBVServer() vserver.set_name(v_name) if nitro is None: return None try: vserver = NSLBVServer.get(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSLBVServer.get() failed: %s', error) vserver = None _disconnect(nitro) return vserver def vserver_exists(v_name, v_ip=None, v_port=None, v_type=None, **connection_args): ''' Checks if a vserver exists CLI Example: .. code-block:: bash salt '*' netscaler.vserver_exists 'vserverName' ''' vserver = _vserver_get(v_name, **connection_args) if vserver is None: return False if v_ip is not None and vserver.get_ipv46() != v_ip: return False if v_port is not None and vserver.get_port() != v_port: return False if v_type is not None and vserver.get_servicetype().upper() != v_type.upper(): return False return True def vserver_add(v_name, v_ip, v_port, v_type, **connection_args): ''' Add a new lb vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_add 'vserverName' 'vserverIP' 'vserverPort' 'vserverType' salt '*' netscaler.vserver_add 'alex.patate.chaude.443' '1.2.3.4' '443' 'SSL' ''' ret = True if vserver_exists(v_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vserver = NSLBVServer() vserver.set_name(v_name) vserver.set_ipv46(v_ip) vserver.set_port(v_port) vserver.set_servicetype(v_type.upper()) try: NSLBVServer.add(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSLBVServer.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_delete(v_name, **connection_args): ''' Delete a lb vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_delete 'vserverName' ''' ret = True vserver = _vserver_get(v_name, **connection_args) if vserver is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSLBVServer.delete(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSVServer.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_servicegroup_get(v_name, sg_name, **connection_args): ret = None nitro = _connect(**connection_args) if nitro is None: return None vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) try: vsgs = NSLBVServerServiceGroupBinding.get(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.get() failed: %s', error) return None for vsg in vsgs: if vsg.get_servicegroupname() == sg_name: ret = vsg _disconnect(nitro) return ret def vserver_servicegroup_exists(v_name, sg_name, **connection_args): ''' Checks if a servicegroup is tied to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_exists 'vserverName' 'serviceGroupName' ''' return _vserver_servicegroup_get(v_name, sg_name, **connection_args) is not None def vserver_servicegroup_add(v_name, sg_name, **connection_args): ''' Bind a servicegroup to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_add 'vserverName' 'serviceGroupName' ''' ret = True if vserver_servicegroup_exists(v_name, sg_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) vsg.set_servicegroupname(sg_name) try: NSLBVServerServiceGroupBinding.add(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_servicegroup_delete(v_name, sg_name, **connection_args): ''' Unbind a servicegroup from a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_delete 'vserverName' 'serviceGroupName' ''' ret = True if not vserver_servicegroup_exists(v_name, sg_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) vsg.set_servicegroupname(sg_name) try: NSLBVServerServiceGroupBinding.delete(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_sslcert_get(v_name, sc_name, **connection_args): ret = None nitro = _connect(**connection_args) if nitro is None: return None sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) try: sslcerts = NSSSLVServerSSLCertKeyBinding.get(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.get() failed: %s', error) return None for sslcert in sslcerts: if sslcert.get_certkeyname() == sc_name: ret = sslcert return ret def vserver_sslcert_exists(v_name, sc_name, **connection_args): ''' Checks if a SSL certificate is tied to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_exists 'vserverName' 'sslCertificateName' ''' return _vserver_sslcert_get(v_name, sc_name, **connection_args) is not None def vserver_sslcert_add(v_name, sc_name, **connection_args): ''' Binds a SSL certificate to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_add 'vserverName' 'sslCertificateName' ''' ret = True if vserver_sslcert_exists(v_name, sc_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) sslcert.set_certkeyname(sc_name) try: NSSSLVServerSSLCertKeyBinding.add(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_sslcert_delete(v_name, sc_name, **connection_args): ''' Unbinds a SSL certificate from a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_delete 'vserverName' 'sslCertificateName' ''' ret = True if not vserver_sslcert_exists(v_name, sc_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) sslcert.set_certkeyname(sc_name) try: NSSSLVServerSSLCertKeyBinding.delete(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.delete() failed: %s', error) ret = False _disconnect(nitro) return ret
saltstack/salt
salt/modules/netscaler.py
servicegroup_server_add
python
def servicegroup_server_add(sg_name, s_name, s_port, **connection_args): ''' Add a server:port member to a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_add 'serviceGroupName' 'serverName' 'serverPort' ''' # Nitro will throw an error if the server is already present ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is not None: return False nitro = _connect(**connection_args) if nitro is None: return False sgsb = NSServiceGroupServerBinding() sgsb.set_servicegroupname(sg_name) sgsb.set_servername(s_name) sgsb.set_port(s_port) try: NSServiceGroupServerBinding.add(nitro, sgsb) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroupServerBinding() failed: %s', error) ret = False _disconnect(nitro) return ret
Add a server:port member to a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_add 'serviceGroupName' 'serverName' 'serverPort'
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/netscaler.py#L350-L378
[ "def _connect(**kwargs):\n '''\n Initialise netscaler connection\n '''\n connargs = dict()\n\n # Shamelessy ripped from the mysql module\n def __connarg(name, key=None, default=None):\n '''\n Add key to connargs, only if name exists in our kwargs or as\n netscaler.<name> in __opts__ or __pillar__ Evaluate in said order - kwargs,\n opts then pillar. To avoid collision with other functions, kwargs-based\n connection arguments are prefixed with 'netscaler_' (i.e.\n 'netscaler_host', 'netscaler_user', etc.).\n '''\n if key is None:\n key = name\n if name in kwargs:\n connargs[key] = kwargs[name]\n else:\n prefix = 'netscaler_'\n if name.startswith(prefix):\n try:\n name = name[len(prefix):]\n except IndexError:\n return\n val = __salt__['config.option']('netscaler.{0}'.format(name), None)\n if val is not None:\n connargs[key] = val\n elif default is not None:\n connargs[key] = default\n\n __connarg('netscaler_host', 'host')\n __connarg('netscaler_user', 'user')\n __connarg('netscaler_pass', 'pass')\n __connarg('netscaler_useSSL', 'useSSL', True)\n\n nitro = NSNitro(connargs['host'], connargs['user'], connargs['pass'], connargs['useSSL'])\n try:\n nitro.login()\n except NSNitroError as error:\n log.debug('netscaler module error - NSNitro.login() failed: %s', error)\n return None\n return nitro\n", "def _disconnect(nitro):\n try:\n nitro.logout()\n except NSNitroError as error:\n log.debug('netscaler module error - NSNitro.logout() failed: %s', error)\n return None\n return nitro\n", "def _servicegroup_get_server(sg_name, s_name, s_port=None, **connection_args):\n '''\n Returns a member of a service group or None\n '''\n ret = None\n servers = _servicegroup_get_servers(sg_name, **connection_args)\n if servers is None:\n return None\n for server in servers:\n if server.get_servername() == s_name:\n if s_port is not None and s_port != server.get_port():\n ret = None\n ret = server\n return ret\n" ]
# -*- coding: utf-8 -*- ''' Module to provide Citrix Netscaler compatibility to Salt (compatible with netscaler 9.2+) .. versionadded:: 2015.2.0 :depends: - nsnitro Python module .. note:: You can install nsnitro using: .. code-block:: bash pip install nsnitro :configuration: This module accepts connection configuration details either as parameters, or as configuration settings in /etc/salt/minion on the relevant minions .. code-block:: yaml netscaler.host: 1.2.3.4 netscaler.user: user netscaler.pass: password This data can also be passed into pillar. Options passed into opts will overwrite options passed into pillar. :CLI Examples: Calls relying on configuration passed using /etc/salt/minion, grains, or pillars: .. code-block:: bash salt-call netscaler.server_exists server_name Calls passing configuration as opts .. code-block:: bash salt-call netscaler.server_exists server_name netscaler_host=1.2.3.4 netscaler_user=username netscaler_pass=password salt-call netscaler.server_exists server_name netscaler_host=1.2.3.5 netscaler_user=username2 netscaler_pass=password2 salt-call netscaler.server_enable server_name2 netscaler_host=1.2.3.5 salt-call netscaler.server_up server_name3 netscaler_host=1.2.3.6 netscaler_useSSL=False ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import logging # Import Salt libs import salt.utils.platform try: from nsnitro.nsnitro import NSNitro from nsnitro.nsexceptions import NSNitroError from nsnitro.nsresources.nsserver import NSServer from nsnitro.nsresources.nsservice import NSService from nsnitro.nsresources.nsservicegroup import NSServiceGroup from nsnitro.nsresources.nsservicegroupserverbinding import NSServiceGroupServerBinding from nsnitro.nsresources.nslbvserver import NSLBVServer from nsnitro.nsresources.nslbvserverservicegroupbinding import NSLBVServerServiceGroupBinding from nsnitro.nsresources.nssslvserversslcertkeybinding import NSSSLVServerSSLCertKeyBinding HAS_NSNITRO = True except ImportError: HAS_NSNITRO = False log = logging.getLogger(__name__) def __virtual__(): ''' Only load this module if the nsnitro library is installed ''' if salt.utils.platform.is_windows(): return ( False, 'The netscaler execution module failed to load: not available ' 'on Windows.' ) if HAS_NSNITRO: return 'netscaler' return ( False, 'The netscaler execution module failed to load: the nsnitro python ' 'library is not available.' ) def _connect(**kwargs): ''' Initialise netscaler connection ''' connargs = dict() # Shamelessy ripped from the mysql module def __connarg(name, key=None, default=None): ''' Add key to connargs, only if name exists in our kwargs or as netscaler.<name> in __opts__ or __pillar__ Evaluate in said order - kwargs, opts then pillar. To avoid collision with other functions, kwargs-based connection arguments are prefixed with 'netscaler_' (i.e. 'netscaler_host', 'netscaler_user', etc.). ''' if key is None: key = name if name in kwargs: connargs[key] = kwargs[name] else: prefix = 'netscaler_' if name.startswith(prefix): try: name = name[len(prefix):] except IndexError: return val = __salt__['config.option']('netscaler.{0}'.format(name), None) if val is not None: connargs[key] = val elif default is not None: connargs[key] = default __connarg('netscaler_host', 'host') __connarg('netscaler_user', 'user') __connarg('netscaler_pass', 'pass') __connarg('netscaler_useSSL', 'useSSL', True) nitro = NSNitro(connargs['host'], connargs['user'], connargs['pass'], connargs['useSSL']) try: nitro.login() except NSNitroError as error: log.debug('netscaler module error - NSNitro.login() failed: %s', error) return None return nitro def _disconnect(nitro): try: nitro.logout() except NSNitroError as error: log.debug('netscaler module error - NSNitro.logout() failed: %s', error) return None return nitro def _servicegroup_get(sg_name, **connection_args): ''' Return a service group ressource or None ''' nitro = _connect(**connection_args) if nitro is None: return None sg = NSServiceGroup() sg.set_servicegroupname(sg_name) try: sg = NSServiceGroup.get(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.get() failed: %s', error) sg = None _disconnect(nitro) return sg def _servicegroup_get_servers(sg_name, **connection_args): ''' Returns a list of members of a servicegroup or None ''' nitro = _connect(**connection_args) if nitro is None: return None sg = NSServiceGroup() sg.set_servicegroupname(sg_name) try: sg = NSServiceGroup.get_servers(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.get_servers failed(): %s', error) sg = None _disconnect(nitro) return sg def _servicegroup_get_server(sg_name, s_name, s_port=None, **connection_args): ''' Returns a member of a service group or None ''' ret = None servers = _servicegroup_get_servers(sg_name, **connection_args) if servers is None: return None for server in servers: if server.get_servername() == s_name: if s_port is not None and s_port != server.get_port(): ret = None ret = server return ret def servicegroup_exists(sg_name, sg_type=None, **connection_args): ''' Checks if a service group exists CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_exists 'serviceGroupName' ''' sg = _servicegroup_get(sg_name, **connection_args) if sg is None: return False if sg_type is not None and sg_type.upper() != sg.get_servicetype(): return False return True def servicegroup_add(sg_name, sg_type='HTTP', **connection_args): ''' Add a new service group If no service type is specified, HTTP will be used. Most common service types: HTTP, SSL, and SSL_BRIDGE CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_add 'serviceGroupName' salt '*' netscaler.servicegroup_add 'serviceGroupName' 'serviceGroupType' ''' ret = True if servicegroup_exists(sg_name): return False nitro = _connect(**connection_args) if nitro is None: return False sg = NSServiceGroup() sg.set_servicegroupname(sg_name) sg.set_servicetype(sg_type.upper()) try: NSServiceGroup.add(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.add() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_delete(sg_name, **connection_args): ''' Delete a new service group CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_delete 'serviceGroupName' ''' ret = True sg = _servicegroup_get(sg_name, **connection_args) if sg is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.delete(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_exists(sg_name, s_name, s_port=None, **connection_args): ''' Check if a server:port combination is a member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_exists 'serviceGroupName' 'serverName' 'serverPort' ''' return _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) is not None def servicegroup_server_up(sg_name, s_name, s_port, **connection_args): ''' Check if a server:port combination is in state UP in a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_up 'serviceGroupName' 'serverName' 'serverPort' ''' server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) return server is not None and server.get_svrstate() == 'UP' def servicegroup_server_enable(sg_name, s_name, s_port, **connection_args): ''' Enable a server:port member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_enable 'serviceGroupName' 'serverName' 'serverPort' ''' ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.enable_server(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.enable_server() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_disable(sg_name, s_name, s_port, **connection_args): ''' Disable a server:port member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_disable 'serviceGroupName' 'serverName' 'serverPort' ''' ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.disable_server(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.disable_server() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_delete(sg_name, s_name, s_port, **connection_args): ''' Remove a server:port member from a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_delete 'serviceGroupName' 'serverName' 'serverPort' ''' # Nitro will throw an error if the server is already not present ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False sgsb = NSServiceGroupServerBinding() sgsb.set_servicegroupname(sg_name) sgsb.set_servername(s_name) sgsb.set_port(s_port) try: NSServiceGroupServerBinding.delete(nitro, sgsb) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroupServerBinding() failed: %s', error) ret = False _disconnect(nitro) return ret def _service_get(s_name, **connection_args): ''' Returns a service ressource or None ''' nitro = _connect(**connection_args) if nitro is None: return None service = NSService() service.set_name(s_name) try: service = NSService.get(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.get() failed: %s', error) service = None _disconnect(nitro) return service def service_exists(s_name, **connection_args): ''' Checks if a service exists CLI Example: .. code-block:: bash salt '*' netscaler.service_exists 'serviceName' ''' return _service_get(s_name, **connection_args) is not None def service_up(s_name, **connection_args): ''' Checks if a service is UP CLI Example: .. code-block:: bash salt '*' netscaler.service_up 'serviceName' ''' service = _service_get(s_name, **connection_args) return service is not None and service.get_svrstate() == 'UP' def service_enable(s_name, **connection_args): ''' Enable a service CLI Example: .. code-block:: bash salt '*' netscaler.service_enable 'serviceName' ''' ret = True service = _service_get(s_name, **connection_args) if service is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSService.enable(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def service_disable(s_name, s_delay=None, **connection_args): ''' Disable a service CLI Example: .. code-block:: bash salt '*' netscaler.service_disable 'serviceName' salt '*' netscaler.service_disable 'serviceName' 'delayInSeconds' ''' ret = True service = _service_get(s_name, **connection_args) if service is None: return False if s_delay is not None: service.set_delay(s_delay) nitro = _connect(**connection_args) if nitro is None: return False try: NSService.disable(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def _server_get(s_name, **connection_args): nitro = _connect(**connection_args) if nitro is None: return None server = NSServer() server.set_name(s_name) try: server = NSServer.get(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.get() failed: %s', error) server = None _disconnect(nitro) return server def server_exists(s_name, ip=None, s_state=None, **connection_args): ''' Checks if a server exists CLI Example: .. code-block:: bash salt '*' netscaler.server_exists 'serverName' ''' server = _server_get(s_name, **connection_args) if server is None: return False if ip is not None and ip != server.get_ipaddress(): return False if s_state is not None and s_state.upper() != server.get_state(): return False return True def server_add(s_name, s_ip, s_state=None, **connection_args): ''' Add a server Note: The default server state is ENABLED CLI Example: .. code-block:: bash salt '*' netscaler.server_add 'serverName' 'serverIpAddress' salt '*' netscaler.server_add 'serverName' 'serverIpAddress' 'serverState' ''' ret = True if server_exists(s_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False server = NSServer() server.set_name(s_name) server.set_ipaddress(s_ip) if s_state is not None: server.set_state(s_state) try: NSServer.add(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.add() failed: %s', error) ret = False _disconnect(nitro) return ret def server_delete(s_name, **connection_args): ''' Delete a server CLI Example: .. code-block:: bash salt '*' netscaler.server_delete 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.delete(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def server_update(s_name, s_ip, **connection_args): ''' Update a server's attributes CLI Example: .. code-block:: bash salt '*' netscaler.server_update 'serverName' 'serverIP' ''' altered = False cur_server = _server_get(s_name, **connection_args) if cur_server is None: return False alt_server = NSServer() alt_server.set_name(s_name) if cur_server.get_ipaddress() != s_ip: alt_server.set_ipaddress(s_ip) altered = True # Nothing to update, the server is already idem if altered is False: return False # Perform the update nitro = _connect(**connection_args) if nitro is None: return False ret = True try: NSServer.update(nitro, alt_server) except NSNitroError as error: log.debug('netscaler module error - NSServer.update() failed: %s', error) ret = False _disconnect(nitro) return ret def server_enabled(s_name, **connection_args): ''' Check if a server is enabled globally CLI Example: .. code-block:: bash salt '*' netscaler.server_enabled 'serverName' ''' server = _server_get(s_name, **connection_args) return server is not None and server.get_state() == 'ENABLED' def server_enable(s_name, **connection_args): ''' Enables a server globally CLI Example: .. code-block:: bash salt '*' netscaler.server_enable 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False if server.get_state() == 'ENABLED': return True nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.enable(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def server_disable(s_name, **connection_args): ''' Disable a server globally CLI Example: .. code-block:: bash salt '*' netscaler.server_disable 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False if server.get_state() == 'DISABLED': return True nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.disable(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.disable() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_get(v_name, **connection_args): nitro = _connect(**connection_args) vserver = NSLBVServer() vserver.set_name(v_name) if nitro is None: return None try: vserver = NSLBVServer.get(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSLBVServer.get() failed: %s', error) vserver = None _disconnect(nitro) return vserver def vserver_exists(v_name, v_ip=None, v_port=None, v_type=None, **connection_args): ''' Checks if a vserver exists CLI Example: .. code-block:: bash salt '*' netscaler.vserver_exists 'vserverName' ''' vserver = _vserver_get(v_name, **connection_args) if vserver is None: return False if v_ip is not None and vserver.get_ipv46() != v_ip: return False if v_port is not None and vserver.get_port() != v_port: return False if v_type is not None and vserver.get_servicetype().upper() != v_type.upper(): return False return True def vserver_add(v_name, v_ip, v_port, v_type, **connection_args): ''' Add a new lb vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_add 'vserverName' 'vserverIP' 'vserverPort' 'vserverType' salt '*' netscaler.vserver_add 'alex.patate.chaude.443' '1.2.3.4' '443' 'SSL' ''' ret = True if vserver_exists(v_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vserver = NSLBVServer() vserver.set_name(v_name) vserver.set_ipv46(v_ip) vserver.set_port(v_port) vserver.set_servicetype(v_type.upper()) try: NSLBVServer.add(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSLBVServer.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_delete(v_name, **connection_args): ''' Delete a lb vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_delete 'vserverName' ''' ret = True vserver = _vserver_get(v_name, **connection_args) if vserver is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSLBVServer.delete(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSVServer.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_servicegroup_get(v_name, sg_name, **connection_args): ret = None nitro = _connect(**connection_args) if nitro is None: return None vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) try: vsgs = NSLBVServerServiceGroupBinding.get(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.get() failed: %s', error) return None for vsg in vsgs: if vsg.get_servicegroupname() == sg_name: ret = vsg _disconnect(nitro) return ret def vserver_servicegroup_exists(v_name, sg_name, **connection_args): ''' Checks if a servicegroup is tied to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_exists 'vserverName' 'serviceGroupName' ''' return _vserver_servicegroup_get(v_name, sg_name, **connection_args) is not None def vserver_servicegroup_add(v_name, sg_name, **connection_args): ''' Bind a servicegroup to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_add 'vserverName' 'serviceGroupName' ''' ret = True if vserver_servicegroup_exists(v_name, sg_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) vsg.set_servicegroupname(sg_name) try: NSLBVServerServiceGroupBinding.add(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_servicegroup_delete(v_name, sg_name, **connection_args): ''' Unbind a servicegroup from a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_delete 'vserverName' 'serviceGroupName' ''' ret = True if not vserver_servicegroup_exists(v_name, sg_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) vsg.set_servicegroupname(sg_name) try: NSLBVServerServiceGroupBinding.delete(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_sslcert_get(v_name, sc_name, **connection_args): ret = None nitro = _connect(**connection_args) if nitro is None: return None sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) try: sslcerts = NSSSLVServerSSLCertKeyBinding.get(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.get() failed: %s', error) return None for sslcert in sslcerts: if sslcert.get_certkeyname() == sc_name: ret = sslcert return ret def vserver_sslcert_exists(v_name, sc_name, **connection_args): ''' Checks if a SSL certificate is tied to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_exists 'vserverName' 'sslCertificateName' ''' return _vserver_sslcert_get(v_name, sc_name, **connection_args) is not None def vserver_sslcert_add(v_name, sc_name, **connection_args): ''' Binds a SSL certificate to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_add 'vserverName' 'sslCertificateName' ''' ret = True if vserver_sslcert_exists(v_name, sc_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) sslcert.set_certkeyname(sc_name) try: NSSSLVServerSSLCertKeyBinding.add(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_sslcert_delete(v_name, sc_name, **connection_args): ''' Unbinds a SSL certificate from a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_delete 'vserverName' 'sslCertificateName' ''' ret = True if not vserver_sslcert_exists(v_name, sc_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) sslcert.set_certkeyname(sc_name) try: NSSSLVServerSSLCertKeyBinding.delete(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.delete() failed: %s', error) ret = False _disconnect(nitro) return ret
saltstack/salt
salt/modules/netscaler.py
_service_get
python
def _service_get(s_name, **connection_args): ''' Returns a service ressource or None ''' nitro = _connect(**connection_args) if nitro is None: return None service = NSService() service.set_name(s_name) try: service = NSService.get(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.get() failed: %s', error) service = None _disconnect(nitro) return service
Returns a service ressource or None
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/netscaler.py#L412-L427
[ "def _connect(**kwargs):\n '''\n Initialise netscaler connection\n '''\n connargs = dict()\n\n # Shamelessy ripped from the mysql module\n def __connarg(name, key=None, default=None):\n '''\n Add key to connargs, only if name exists in our kwargs or as\n netscaler.<name> in __opts__ or __pillar__ Evaluate in said order - kwargs,\n opts then pillar. To avoid collision with other functions, kwargs-based\n connection arguments are prefixed with 'netscaler_' (i.e.\n 'netscaler_host', 'netscaler_user', etc.).\n '''\n if key is None:\n key = name\n if name in kwargs:\n connargs[key] = kwargs[name]\n else:\n prefix = 'netscaler_'\n if name.startswith(prefix):\n try:\n name = name[len(prefix):]\n except IndexError:\n return\n val = __salt__['config.option']('netscaler.{0}'.format(name), None)\n if val is not None:\n connargs[key] = val\n elif default is not None:\n connargs[key] = default\n\n __connarg('netscaler_host', 'host')\n __connarg('netscaler_user', 'user')\n __connarg('netscaler_pass', 'pass')\n __connarg('netscaler_useSSL', 'useSSL', True)\n\n nitro = NSNitro(connargs['host'], connargs['user'], connargs['pass'], connargs['useSSL'])\n try:\n nitro.login()\n except NSNitroError as error:\n log.debug('netscaler module error - NSNitro.login() failed: %s', error)\n return None\n return nitro\n", "def _disconnect(nitro):\n try:\n nitro.logout()\n except NSNitroError as error:\n log.debug('netscaler module error - NSNitro.logout() failed: %s', error)\n return None\n return nitro\n" ]
# -*- coding: utf-8 -*- ''' Module to provide Citrix Netscaler compatibility to Salt (compatible with netscaler 9.2+) .. versionadded:: 2015.2.0 :depends: - nsnitro Python module .. note:: You can install nsnitro using: .. code-block:: bash pip install nsnitro :configuration: This module accepts connection configuration details either as parameters, or as configuration settings in /etc/salt/minion on the relevant minions .. code-block:: yaml netscaler.host: 1.2.3.4 netscaler.user: user netscaler.pass: password This data can also be passed into pillar. Options passed into opts will overwrite options passed into pillar. :CLI Examples: Calls relying on configuration passed using /etc/salt/minion, grains, or pillars: .. code-block:: bash salt-call netscaler.server_exists server_name Calls passing configuration as opts .. code-block:: bash salt-call netscaler.server_exists server_name netscaler_host=1.2.3.4 netscaler_user=username netscaler_pass=password salt-call netscaler.server_exists server_name netscaler_host=1.2.3.5 netscaler_user=username2 netscaler_pass=password2 salt-call netscaler.server_enable server_name2 netscaler_host=1.2.3.5 salt-call netscaler.server_up server_name3 netscaler_host=1.2.3.6 netscaler_useSSL=False ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import logging # Import Salt libs import salt.utils.platform try: from nsnitro.nsnitro import NSNitro from nsnitro.nsexceptions import NSNitroError from nsnitro.nsresources.nsserver import NSServer from nsnitro.nsresources.nsservice import NSService from nsnitro.nsresources.nsservicegroup import NSServiceGroup from nsnitro.nsresources.nsservicegroupserverbinding import NSServiceGroupServerBinding from nsnitro.nsresources.nslbvserver import NSLBVServer from nsnitro.nsresources.nslbvserverservicegroupbinding import NSLBVServerServiceGroupBinding from nsnitro.nsresources.nssslvserversslcertkeybinding import NSSSLVServerSSLCertKeyBinding HAS_NSNITRO = True except ImportError: HAS_NSNITRO = False log = logging.getLogger(__name__) def __virtual__(): ''' Only load this module if the nsnitro library is installed ''' if salt.utils.platform.is_windows(): return ( False, 'The netscaler execution module failed to load: not available ' 'on Windows.' ) if HAS_NSNITRO: return 'netscaler' return ( False, 'The netscaler execution module failed to load: the nsnitro python ' 'library is not available.' ) def _connect(**kwargs): ''' Initialise netscaler connection ''' connargs = dict() # Shamelessy ripped from the mysql module def __connarg(name, key=None, default=None): ''' Add key to connargs, only if name exists in our kwargs or as netscaler.<name> in __opts__ or __pillar__ Evaluate in said order - kwargs, opts then pillar. To avoid collision with other functions, kwargs-based connection arguments are prefixed with 'netscaler_' (i.e. 'netscaler_host', 'netscaler_user', etc.). ''' if key is None: key = name if name in kwargs: connargs[key] = kwargs[name] else: prefix = 'netscaler_' if name.startswith(prefix): try: name = name[len(prefix):] except IndexError: return val = __salt__['config.option']('netscaler.{0}'.format(name), None) if val is not None: connargs[key] = val elif default is not None: connargs[key] = default __connarg('netscaler_host', 'host') __connarg('netscaler_user', 'user') __connarg('netscaler_pass', 'pass') __connarg('netscaler_useSSL', 'useSSL', True) nitro = NSNitro(connargs['host'], connargs['user'], connargs['pass'], connargs['useSSL']) try: nitro.login() except NSNitroError as error: log.debug('netscaler module error - NSNitro.login() failed: %s', error) return None return nitro def _disconnect(nitro): try: nitro.logout() except NSNitroError as error: log.debug('netscaler module error - NSNitro.logout() failed: %s', error) return None return nitro def _servicegroup_get(sg_name, **connection_args): ''' Return a service group ressource or None ''' nitro = _connect(**connection_args) if nitro is None: return None sg = NSServiceGroup() sg.set_servicegroupname(sg_name) try: sg = NSServiceGroup.get(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.get() failed: %s', error) sg = None _disconnect(nitro) return sg def _servicegroup_get_servers(sg_name, **connection_args): ''' Returns a list of members of a servicegroup or None ''' nitro = _connect(**connection_args) if nitro is None: return None sg = NSServiceGroup() sg.set_servicegroupname(sg_name) try: sg = NSServiceGroup.get_servers(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.get_servers failed(): %s', error) sg = None _disconnect(nitro) return sg def _servicegroup_get_server(sg_name, s_name, s_port=None, **connection_args): ''' Returns a member of a service group or None ''' ret = None servers = _servicegroup_get_servers(sg_name, **connection_args) if servers is None: return None for server in servers: if server.get_servername() == s_name: if s_port is not None and s_port != server.get_port(): ret = None ret = server return ret def servicegroup_exists(sg_name, sg_type=None, **connection_args): ''' Checks if a service group exists CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_exists 'serviceGroupName' ''' sg = _servicegroup_get(sg_name, **connection_args) if sg is None: return False if sg_type is not None and sg_type.upper() != sg.get_servicetype(): return False return True def servicegroup_add(sg_name, sg_type='HTTP', **connection_args): ''' Add a new service group If no service type is specified, HTTP will be used. Most common service types: HTTP, SSL, and SSL_BRIDGE CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_add 'serviceGroupName' salt '*' netscaler.servicegroup_add 'serviceGroupName' 'serviceGroupType' ''' ret = True if servicegroup_exists(sg_name): return False nitro = _connect(**connection_args) if nitro is None: return False sg = NSServiceGroup() sg.set_servicegroupname(sg_name) sg.set_servicetype(sg_type.upper()) try: NSServiceGroup.add(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.add() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_delete(sg_name, **connection_args): ''' Delete a new service group CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_delete 'serviceGroupName' ''' ret = True sg = _servicegroup_get(sg_name, **connection_args) if sg is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.delete(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_exists(sg_name, s_name, s_port=None, **connection_args): ''' Check if a server:port combination is a member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_exists 'serviceGroupName' 'serverName' 'serverPort' ''' return _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) is not None def servicegroup_server_up(sg_name, s_name, s_port, **connection_args): ''' Check if a server:port combination is in state UP in a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_up 'serviceGroupName' 'serverName' 'serverPort' ''' server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) return server is not None and server.get_svrstate() == 'UP' def servicegroup_server_enable(sg_name, s_name, s_port, **connection_args): ''' Enable a server:port member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_enable 'serviceGroupName' 'serverName' 'serverPort' ''' ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.enable_server(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.enable_server() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_disable(sg_name, s_name, s_port, **connection_args): ''' Disable a server:port member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_disable 'serviceGroupName' 'serverName' 'serverPort' ''' ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.disable_server(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.disable_server() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_add(sg_name, s_name, s_port, **connection_args): ''' Add a server:port member to a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_add 'serviceGroupName' 'serverName' 'serverPort' ''' # Nitro will throw an error if the server is already present ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is not None: return False nitro = _connect(**connection_args) if nitro is None: return False sgsb = NSServiceGroupServerBinding() sgsb.set_servicegroupname(sg_name) sgsb.set_servername(s_name) sgsb.set_port(s_port) try: NSServiceGroupServerBinding.add(nitro, sgsb) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroupServerBinding() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_delete(sg_name, s_name, s_port, **connection_args): ''' Remove a server:port member from a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_delete 'serviceGroupName' 'serverName' 'serverPort' ''' # Nitro will throw an error if the server is already not present ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False sgsb = NSServiceGroupServerBinding() sgsb.set_servicegroupname(sg_name) sgsb.set_servername(s_name) sgsb.set_port(s_port) try: NSServiceGroupServerBinding.delete(nitro, sgsb) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroupServerBinding() failed: %s', error) ret = False _disconnect(nitro) return ret def service_exists(s_name, **connection_args): ''' Checks if a service exists CLI Example: .. code-block:: bash salt '*' netscaler.service_exists 'serviceName' ''' return _service_get(s_name, **connection_args) is not None def service_up(s_name, **connection_args): ''' Checks if a service is UP CLI Example: .. code-block:: bash salt '*' netscaler.service_up 'serviceName' ''' service = _service_get(s_name, **connection_args) return service is not None and service.get_svrstate() == 'UP' def service_enable(s_name, **connection_args): ''' Enable a service CLI Example: .. code-block:: bash salt '*' netscaler.service_enable 'serviceName' ''' ret = True service = _service_get(s_name, **connection_args) if service is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSService.enable(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def service_disable(s_name, s_delay=None, **connection_args): ''' Disable a service CLI Example: .. code-block:: bash salt '*' netscaler.service_disable 'serviceName' salt '*' netscaler.service_disable 'serviceName' 'delayInSeconds' ''' ret = True service = _service_get(s_name, **connection_args) if service is None: return False if s_delay is not None: service.set_delay(s_delay) nitro = _connect(**connection_args) if nitro is None: return False try: NSService.disable(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def _server_get(s_name, **connection_args): nitro = _connect(**connection_args) if nitro is None: return None server = NSServer() server.set_name(s_name) try: server = NSServer.get(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.get() failed: %s', error) server = None _disconnect(nitro) return server def server_exists(s_name, ip=None, s_state=None, **connection_args): ''' Checks if a server exists CLI Example: .. code-block:: bash salt '*' netscaler.server_exists 'serverName' ''' server = _server_get(s_name, **connection_args) if server is None: return False if ip is not None and ip != server.get_ipaddress(): return False if s_state is not None and s_state.upper() != server.get_state(): return False return True def server_add(s_name, s_ip, s_state=None, **connection_args): ''' Add a server Note: The default server state is ENABLED CLI Example: .. code-block:: bash salt '*' netscaler.server_add 'serverName' 'serverIpAddress' salt '*' netscaler.server_add 'serverName' 'serverIpAddress' 'serverState' ''' ret = True if server_exists(s_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False server = NSServer() server.set_name(s_name) server.set_ipaddress(s_ip) if s_state is not None: server.set_state(s_state) try: NSServer.add(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.add() failed: %s', error) ret = False _disconnect(nitro) return ret def server_delete(s_name, **connection_args): ''' Delete a server CLI Example: .. code-block:: bash salt '*' netscaler.server_delete 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.delete(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def server_update(s_name, s_ip, **connection_args): ''' Update a server's attributes CLI Example: .. code-block:: bash salt '*' netscaler.server_update 'serverName' 'serverIP' ''' altered = False cur_server = _server_get(s_name, **connection_args) if cur_server is None: return False alt_server = NSServer() alt_server.set_name(s_name) if cur_server.get_ipaddress() != s_ip: alt_server.set_ipaddress(s_ip) altered = True # Nothing to update, the server is already idem if altered is False: return False # Perform the update nitro = _connect(**connection_args) if nitro is None: return False ret = True try: NSServer.update(nitro, alt_server) except NSNitroError as error: log.debug('netscaler module error - NSServer.update() failed: %s', error) ret = False _disconnect(nitro) return ret def server_enabled(s_name, **connection_args): ''' Check if a server is enabled globally CLI Example: .. code-block:: bash salt '*' netscaler.server_enabled 'serverName' ''' server = _server_get(s_name, **connection_args) return server is not None and server.get_state() == 'ENABLED' def server_enable(s_name, **connection_args): ''' Enables a server globally CLI Example: .. code-block:: bash salt '*' netscaler.server_enable 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False if server.get_state() == 'ENABLED': return True nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.enable(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def server_disable(s_name, **connection_args): ''' Disable a server globally CLI Example: .. code-block:: bash salt '*' netscaler.server_disable 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False if server.get_state() == 'DISABLED': return True nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.disable(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.disable() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_get(v_name, **connection_args): nitro = _connect(**connection_args) vserver = NSLBVServer() vserver.set_name(v_name) if nitro is None: return None try: vserver = NSLBVServer.get(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSLBVServer.get() failed: %s', error) vserver = None _disconnect(nitro) return vserver def vserver_exists(v_name, v_ip=None, v_port=None, v_type=None, **connection_args): ''' Checks if a vserver exists CLI Example: .. code-block:: bash salt '*' netscaler.vserver_exists 'vserverName' ''' vserver = _vserver_get(v_name, **connection_args) if vserver is None: return False if v_ip is not None and vserver.get_ipv46() != v_ip: return False if v_port is not None and vserver.get_port() != v_port: return False if v_type is not None and vserver.get_servicetype().upper() != v_type.upper(): return False return True def vserver_add(v_name, v_ip, v_port, v_type, **connection_args): ''' Add a new lb vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_add 'vserverName' 'vserverIP' 'vserverPort' 'vserverType' salt '*' netscaler.vserver_add 'alex.patate.chaude.443' '1.2.3.4' '443' 'SSL' ''' ret = True if vserver_exists(v_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vserver = NSLBVServer() vserver.set_name(v_name) vserver.set_ipv46(v_ip) vserver.set_port(v_port) vserver.set_servicetype(v_type.upper()) try: NSLBVServer.add(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSLBVServer.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_delete(v_name, **connection_args): ''' Delete a lb vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_delete 'vserverName' ''' ret = True vserver = _vserver_get(v_name, **connection_args) if vserver is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSLBVServer.delete(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSVServer.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_servicegroup_get(v_name, sg_name, **connection_args): ret = None nitro = _connect(**connection_args) if nitro is None: return None vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) try: vsgs = NSLBVServerServiceGroupBinding.get(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.get() failed: %s', error) return None for vsg in vsgs: if vsg.get_servicegroupname() == sg_name: ret = vsg _disconnect(nitro) return ret def vserver_servicegroup_exists(v_name, sg_name, **connection_args): ''' Checks if a servicegroup is tied to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_exists 'vserverName' 'serviceGroupName' ''' return _vserver_servicegroup_get(v_name, sg_name, **connection_args) is not None def vserver_servicegroup_add(v_name, sg_name, **connection_args): ''' Bind a servicegroup to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_add 'vserverName' 'serviceGroupName' ''' ret = True if vserver_servicegroup_exists(v_name, sg_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) vsg.set_servicegroupname(sg_name) try: NSLBVServerServiceGroupBinding.add(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_servicegroup_delete(v_name, sg_name, **connection_args): ''' Unbind a servicegroup from a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_delete 'vserverName' 'serviceGroupName' ''' ret = True if not vserver_servicegroup_exists(v_name, sg_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) vsg.set_servicegroupname(sg_name) try: NSLBVServerServiceGroupBinding.delete(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_sslcert_get(v_name, sc_name, **connection_args): ret = None nitro = _connect(**connection_args) if nitro is None: return None sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) try: sslcerts = NSSSLVServerSSLCertKeyBinding.get(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.get() failed: %s', error) return None for sslcert in sslcerts: if sslcert.get_certkeyname() == sc_name: ret = sslcert return ret def vserver_sslcert_exists(v_name, sc_name, **connection_args): ''' Checks if a SSL certificate is tied to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_exists 'vserverName' 'sslCertificateName' ''' return _vserver_sslcert_get(v_name, sc_name, **connection_args) is not None def vserver_sslcert_add(v_name, sc_name, **connection_args): ''' Binds a SSL certificate to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_add 'vserverName' 'sslCertificateName' ''' ret = True if vserver_sslcert_exists(v_name, sc_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) sslcert.set_certkeyname(sc_name) try: NSSSLVServerSSLCertKeyBinding.add(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_sslcert_delete(v_name, sc_name, **connection_args): ''' Unbinds a SSL certificate from a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_delete 'vserverName' 'sslCertificateName' ''' ret = True if not vserver_sslcert_exists(v_name, sc_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) sslcert.set_certkeyname(sc_name) try: NSSSLVServerSSLCertKeyBinding.delete(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.delete() failed: %s', error) ret = False _disconnect(nitro) return ret
saltstack/salt
salt/modules/netscaler.py
service_up
python
def service_up(s_name, **connection_args): ''' Checks if a service is UP CLI Example: .. code-block:: bash salt '*' netscaler.service_up 'serviceName' ''' service = _service_get(s_name, **connection_args) return service is not None and service.get_svrstate() == 'UP'
Checks if a service is UP CLI Example: .. code-block:: bash salt '*' netscaler.service_up 'serviceName'
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/netscaler.py#L443-L454
[ "def _service_get(s_name, **connection_args):\n '''\n Returns a service ressource or None\n '''\n nitro = _connect(**connection_args)\n if nitro is None:\n return None\n service = NSService()\n service.set_name(s_name)\n try:\n service = NSService.get(nitro, service)\n except NSNitroError as error:\n log.debug('netscaler module error - NSService.get() failed: %s', error)\n service = None\n _disconnect(nitro)\n return service\n" ]
# -*- coding: utf-8 -*- ''' Module to provide Citrix Netscaler compatibility to Salt (compatible with netscaler 9.2+) .. versionadded:: 2015.2.0 :depends: - nsnitro Python module .. note:: You can install nsnitro using: .. code-block:: bash pip install nsnitro :configuration: This module accepts connection configuration details either as parameters, or as configuration settings in /etc/salt/minion on the relevant minions .. code-block:: yaml netscaler.host: 1.2.3.4 netscaler.user: user netscaler.pass: password This data can also be passed into pillar. Options passed into opts will overwrite options passed into pillar. :CLI Examples: Calls relying on configuration passed using /etc/salt/minion, grains, or pillars: .. code-block:: bash salt-call netscaler.server_exists server_name Calls passing configuration as opts .. code-block:: bash salt-call netscaler.server_exists server_name netscaler_host=1.2.3.4 netscaler_user=username netscaler_pass=password salt-call netscaler.server_exists server_name netscaler_host=1.2.3.5 netscaler_user=username2 netscaler_pass=password2 salt-call netscaler.server_enable server_name2 netscaler_host=1.2.3.5 salt-call netscaler.server_up server_name3 netscaler_host=1.2.3.6 netscaler_useSSL=False ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import logging # Import Salt libs import salt.utils.platform try: from nsnitro.nsnitro import NSNitro from nsnitro.nsexceptions import NSNitroError from nsnitro.nsresources.nsserver import NSServer from nsnitro.nsresources.nsservice import NSService from nsnitro.nsresources.nsservicegroup import NSServiceGroup from nsnitro.nsresources.nsservicegroupserverbinding import NSServiceGroupServerBinding from nsnitro.nsresources.nslbvserver import NSLBVServer from nsnitro.nsresources.nslbvserverservicegroupbinding import NSLBVServerServiceGroupBinding from nsnitro.nsresources.nssslvserversslcertkeybinding import NSSSLVServerSSLCertKeyBinding HAS_NSNITRO = True except ImportError: HAS_NSNITRO = False log = logging.getLogger(__name__) def __virtual__(): ''' Only load this module if the nsnitro library is installed ''' if salt.utils.platform.is_windows(): return ( False, 'The netscaler execution module failed to load: not available ' 'on Windows.' ) if HAS_NSNITRO: return 'netscaler' return ( False, 'The netscaler execution module failed to load: the nsnitro python ' 'library is not available.' ) def _connect(**kwargs): ''' Initialise netscaler connection ''' connargs = dict() # Shamelessy ripped from the mysql module def __connarg(name, key=None, default=None): ''' Add key to connargs, only if name exists in our kwargs or as netscaler.<name> in __opts__ or __pillar__ Evaluate in said order - kwargs, opts then pillar. To avoid collision with other functions, kwargs-based connection arguments are prefixed with 'netscaler_' (i.e. 'netscaler_host', 'netscaler_user', etc.). ''' if key is None: key = name if name in kwargs: connargs[key] = kwargs[name] else: prefix = 'netscaler_' if name.startswith(prefix): try: name = name[len(prefix):] except IndexError: return val = __salt__['config.option']('netscaler.{0}'.format(name), None) if val is not None: connargs[key] = val elif default is not None: connargs[key] = default __connarg('netscaler_host', 'host') __connarg('netscaler_user', 'user') __connarg('netscaler_pass', 'pass') __connarg('netscaler_useSSL', 'useSSL', True) nitro = NSNitro(connargs['host'], connargs['user'], connargs['pass'], connargs['useSSL']) try: nitro.login() except NSNitroError as error: log.debug('netscaler module error - NSNitro.login() failed: %s', error) return None return nitro def _disconnect(nitro): try: nitro.logout() except NSNitroError as error: log.debug('netscaler module error - NSNitro.logout() failed: %s', error) return None return nitro def _servicegroup_get(sg_name, **connection_args): ''' Return a service group ressource or None ''' nitro = _connect(**connection_args) if nitro is None: return None sg = NSServiceGroup() sg.set_servicegroupname(sg_name) try: sg = NSServiceGroup.get(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.get() failed: %s', error) sg = None _disconnect(nitro) return sg def _servicegroup_get_servers(sg_name, **connection_args): ''' Returns a list of members of a servicegroup or None ''' nitro = _connect(**connection_args) if nitro is None: return None sg = NSServiceGroup() sg.set_servicegroupname(sg_name) try: sg = NSServiceGroup.get_servers(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.get_servers failed(): %s', error) sg = None _disconnect(nitro) return sg def _servicegroup_get_server(sg_name, s_name, s_port=None, **connection_args): ''' Returns a member of a service group or None ''' ret = None servers = _servicegroup_get_servers(sg_name, **connection_args) if servers is None: return None for server in servers: if server.get_servername() == s_name: if s_port is not None and s_port != server.get_port(): ret = None ret = server return ret def servicegroup_exists(sg_name, sg_type=None, **connection_args): ''' Checks if a service group exists CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_exists 'serviceGroupName' ''' sg = _servicegroup_get(sg_name, **connection_args) if sg is None: return False if sg_type is not None and sg_type.upper() != sg.get_servicetype(): return False return True def servicegroup_add(sg_name, sg_type='HTTP', **connection_args): ''' Add a new service group If no service type is specified, HTTP will be used. Most common service types: HTTP, SSL, and SSL_BRIDGE CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_add 'serviceGroupName' salt '*' netscaler.servicegroup_add 'serviceGroupName' 'serviceGroupType' ''' ret = True if servicegroup_exists(sg_name): return False nitro = _connect(**connection_args) if nitro is None: return False sg = NSServiceGroup() sg.set_servicegroupname(sg_name) sg.set_servicetype(sg_type.upper()) try: NSServiceGroup.add(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.add() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_delete(sg_name, **connection_args): ''' Delete a new service group CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_delete 'serviceGroupName' ''' ret = True sg = _servicegroup_get(sg_name, **connection_args) if sg is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.delete(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_exists(sg_name, s_name, s_port=None, **connection_args): ''' Check if a server:port combination is a member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_exists 'serviceGroupName' 'serverName' 'serverPort' ''' return _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) is not None def servicegroup_server_up(sg_name, s_name, s_port, **connection_args): ''' Check if a server:port combination is in state UP in a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_up 'serviceGroupName' 'serverName' 'serverPort' ''' server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) return server is not None and server.get_svrstate() == 'UP' def servicegroup_server_enable(sg_name, s_name, s_port, **connection_args): ''' Enable a server:port member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_enable 'serviceGroupName' 'serverName' 'serverPort' ''' ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.enable_server(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.enable_server() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_disable(sg_name, s_name, s_port, **connection_args): ''' Disable a server:port member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_disable 'serviceGroupName' 'serverName' 'serverPort' ''' ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.disable_server(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.disable_server() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_add(sg_name, s_name, s_port, **connection_args): ''' Add a server:port member to a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_add 'serviceGroupName' 'serverName' 'serverPort' ''' # Nitro will throw an error if the server is already present ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is not None: return False nitro = _connect(**connection_args) if nitro is None: return False sgsb = NSServiceGroupServerBinding() sgsb.set_servicegroupname(sg_name) sgsb.set_servername(s_name) sgsb.set_port(s_port) try: NSServiceGroupServerBinding.add(nitro, sgsb) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroupServerBinding() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_delete(sg_name, s_name, s_port, **connection_args): ''' Remove a server:port member from a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_delete 'serviceGroupName' 'serverName' 'serverPort' ''' # Nitro will throw an error if the server is already not present ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False sgsb = NSServiceGroupServerBinding() sgsb.set_servicegroupname(sg_name) sgsb.set_servername(s_name) sgsb.set_port(s_port) try: NSServiceGroupServerBinding.delete(nitro, sgsb) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroupServerBinding() failed: %s', error) ret = False _disconnect(nitro) return ret def _service_get(s_name, **connection_args): ''' Returns a service ressource or None ''' nitro = _connect(**connection_args) if nitro is None: return None service = NSService() service.set_name(s_name) try: service = NSService.get(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.get() failed: %s', error) service = None _disconnect(nitro) return service def service_exists(s_name, **connection_args): ''' Checks if a service exists CLI Example: .. code-block:: bash salt '*' netscaler.service_exists 'serviceName' ''' return _service_get(s_name, **connection_args) is not None def service_enable(s_name, **connection_args): ''' Enable a service CLI Example: .. code-block:: bash salt '*' netscaler.service_enable 'serviceName' ''' ret = True service = _service_get(s_name, **connection_args) if service is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSService.enable(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def service_disable(s_name, s_delay=None, **connection_args): ''' Disable a service CLI Example: .. code-block:: bash salt '*' netscaler.service_disable 'serviceName' salt '*' netscaler.service_disable 'serviceName' 'delayInSeconds' ''' ret = True service = _service_get(s_name, **connection_args) if service is None: return False if s_delay is not None: service.set_delay(s_delay) nitro = _connect(**connection_args) if nitro is None: return False try: NSService.disable(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def _server_get(s_name, **connection_args): nitro = _connect(**connection_args) if nitro is None: return None server = NSServer() server.set_name(s_name) try: server = NSServer.get(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.get() failed: %s', error) server = None _disconnect(nitro) return server def server_exists(s_name, ip=None, s_state=None, **connection_args): ''' Checks if a server exists CLI Example: .. code-block:: bash salt '*' netscaler.server_exists 'serverName' ''' server = _server_get(s_name, **connection_args) if server is None: return False if ip is not None and ip != server.get_ipaddress(): return False if s_state is not None and s_state.upper() != server.get_state(): return False return True def server_add(s_name, s_ip, s_state=None, **connection_args): ''' Add a server Note: The default server state is ENABLED CLI Example: .. code-block:: bash salt '*' netscaler.server_add 'serverName' 'serverIpAddress' salt '*' netscaler.server_add 'serverName' 'serverIpAddress' 'serverState' ''' ret = True if server_exists(s_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False server = NSServer() server.set_name(s_name) server.set_ipaddress(s_ip) if s_state is not None: server.set_state(s_state) try: NSServer.add(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.add() failed: %s', error) ret = False _disconnect(nitro) return ret def server_delete(s_name, **connection_args): ''' Delete a server CLI Example: .. code-block:: bash salt '*' netscaler.server_delete 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.delete(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def server_update(s_name, s_ip, **connection_args): ''' Update a server's attributes CLI Example: .. code-block:: bash salt '*' netscaler.server_update 'serverName' 'serverIP' ''' altered = False cur_server = _server_get(s_name, **connection_args) if cur_server is None: return False alt_server = NSServer() alt_server.set_name(s_name) if cur_server.get_ipaddress() != s_ip: alt_server.set_ipaddress(s_ip) altered = True # Nothing to update, the server is already idem if altered is False: return False # Perform the update nitro = _connect(**connection_args) if nitro is None: return False ret = True try: NSServer.update(nitro, alt_server) except NSNitroError as error: log.debug('netscaler module error - NSServer.update() failed: %s', error) ret = False _disconnect(nitro) return ret def server_enabled(s_name, **connection_args): ''' Check if a server is enabled globally CLI Example: .. code-block:: bash salt '*' netscaler.server_enabled 'serverName' ''' server = _server_get(s_name, **connection_args) return server is not None and server.get_state() == 'ENABLED' def server_enable(s_name, **connection_args): ''' Enables a server globally CLI Example: .. code-block:: bash salt '*' netscaler.server_enable 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False if server.get_state() == 'ENABLED': return True nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.enable(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def server_disable(s_name, **connection_args): ''' Disable a server globally CLI Example: .. code-block:: bash salt '*' netscaler.server_disable 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False if server.get_state() == 'DISABLED': return True nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.disable(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.disable() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_get(v_name, **connection_args): nitro = _connect(**connection_args) vserver = NSLBVServer() vserver.set_name(v_name) if nitro is None: return None try: vserver = NSLBVServer.get(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSLBVServer.get() failed: %s', error) vserver = None _disconnect(nitro) return vserver def vserver_exists(v_name, v_ip=None, v_port=None, v_type=None, **connection_args): ''' Checks if a vserver exists CLI Example: .. code-block:: bash salt '*' netscaler.vserver_exists 'vserverName' ''' vserver = _vserver_get(v_name, **connection_args) if vserver is None: return False if v_ip is not None and vserver.get_ipv46() != v_ip: return False if v_port is not None and vserver.get_port() != v_port: return False if v_type is not None and vserver.get_servicetype().upper() != v_type.upper(): return False return True def vserver_add(v_name, v_ip, v_port, v_type, **connection_args): ''' Add a new lb vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_add 'vserverName' 'vserverIP' 'vserverPort' 'vserverType' salt '*' netscaler.vserver_add 'alex.patate.chaude.443' '1.2.3.4' '443' 'SSL' ''' ret = True if vserver_exists(v_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vserver = NSLBVServer() vserver.set_name(v_name) vserver.set_ipv46(v_ip) vserver.set_port(v_port) vserver.set_servicetype(v_type.upper()) try: NSLBVServer.add(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSLBVServer.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_delete(v_name, **connection_args): ''' Delete a lb vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_delete 'vserverName' ''' ret = True vserver = _vserver_get(v_name, **connection_args) if vserver is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSLBVServer.delete(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSVServer.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_servicegroup_get(v_name, sg_name, **connection_args): ret = None nitro = _connect(**connection_args) if nitro is None: return None vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) try: vsgs = NSLBVServerServiceGroupBinding.get(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.get() failed: %s', error) return None for vsg in vsgs: if vsg.get_servicegroupname() == sg_name: ret = vsg _disconnect(nitro) return ret def vserver_servicegroup_exists(v_name, sg_name, **connection_args): ''' Checks if a servicegroup is tied to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_exists 'vserverName' 'serviceGroupName' ''' return _vserver_servicegroup_get(v_name, sg_name, **connection_args) is not None def vserver_servicegroup_add(v_name, sg_name, **connection_args): ''' Bind a servicegroup to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_add 'vserverName' 'serviceGroupName' ''' ret = True if vserver_servicegroup_exists(v_name, sg_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) vsg.set_servicegroupname(sg_name) try: NSLBVServerServiceGroupBinding.add(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_servicegroup_delete(v_name, sg_name, **connection_args): ''' Unbind a servicegroup from a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_delete 'vserverName' 'serviceGroupName' ''' ret = True if not vserver_servicegroup_exists(v_name, sg_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) vsg.set_servicegroupname(sg_name) try: NSLBVServerServiceGroupBinding.delete(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_sslcert_get(v_name, sc_name, **connection_args): ret = None nitro = _connect(**connection_args) if nitro is None: return None sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) try: sslcerts = NSSSLVServerSSLCertKeyBinding.get(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.get() failed: %s', error) return None for sslcert in sslcerts: if sslcert.get_certkeyname() == sc_name: ret = sslcert return ret def vserver_sslcert_exists(v_name, sc_name, **connection_args): ''' Checks if a SSL certificate is tied to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_exists 'vserverName' 'sslCertificateName' ''' return _vserver_sslcert_get(v_name, sc_name, **connection_args) is not None def vserver_sslcert_add(v_name, sc_name, **connection_args): ''' Binds a SSL certificate to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_add 'vserverName' 'sslCertificateName' ''' ret = True if vserver_sslcert_exists(v_name, sc_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) sslcert.set_certkeyname(sc_name) try: NSSSLVServerSSLCertKeyBinding.add(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_sslcert_delete(v_name, sc_name, **connection_args): ''' Unbinds a SSL certificate from a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_delete 'vserverName' 'sslCertificateName' ''' ret = True if not vserver_sslcert_exists(v_name, sc_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) sslcert.set_certkeyname(sc_name) try: NSSSLVServerSSLCertKeyBinding.delete(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.delete() failed: %s', error) ret = False _disconnect(nitro) return ret
saltstack/salt
salt/modules/netscaler.py
service_enable
python
def service_enable(s_name, **connection_args): ''' Enable a service CLI Example: .. code-block:: bash salt '*' netscaler.service_enable 'serviceName' ''' ret = True service = _service_get(s_name, **connection_args) if service is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSService.enable(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.enable() failed: %s', error) ret = False _disconnect(nitro) return ret
Enable a service CLI Example: .. code-block:: bash salt '*' netscaler.service_enable 'serviceName'
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/netscaler.py#L457-L481
[ "def _connect(**kwargs):\n '''\n Initialise netscaler connection\n '''\n connargs = dict()\n\n # Shamelessy ripped from the mysql module\n def __connarg(name, key=None, default=None):\n '''\n Add key to connargs, only if name exists in our kwargs or as\n netscaler.<name> in __opts__ or __pillar__ Evaluate in said order - kwargs,\n opts then pillar. To avoid collision with other functions, kwargs-based\n connection arguments are prefixed with 'netscaler_' (i.e.\n 'netscaler_host', 'netscaler_user', etc.).\n '''\n if key is None:\n key = name\n if name in kwargs:\n connargs[key] = kwargs[name]\n else:\n prefix = 'netscaler_'\n if name.startswith(prefix):\n try:\n name = name[len(prefix):]\n except IndexError:\n return\n val = __salt__['config.option']('netscaler.{0}'.format(name), None)\n if val is not None:\n connargs[key] = val\n elif default is not None:\n connargs[key] = default\n\n __connarg('netscaler_host', 'host')\n __connarg('netscaler_user', 'user')\n __connarg('netscaler_pass', 'pass')\n __connarg('netscaler_useSSL', 'useSSL', True)\n\n nitro = NSNitro(connargs['host'], connargs['user'], connargs['pass'], connargs['useSSL'])\n try:\n nitro.login()\n except NSNitroError as error:\n log.debug('netscaler module error - NSNitro.login() failed: %s', error)\n return None\n return nitro\n", "def _disconnect(nitro):\n try:\n nitro.logout()\n except NSNitroError as error:\n log.debug('netscaler module error - NSNitro.logout() failed: %s', error)\n return None\n return nitro\n", "def _service_get(s_name, **connection_args):\n '''\n Returns a service ressource or None\n '''\n nitro = _connect(**connection_args)\n if nitro is None:\n return None\n service = NSService()\n service.set_name(s_name)\n try:\n service = NSService.get(nitro, service)\n except NSNitroError as error:\n log.debug('netscaler module error - NSService.get() failed: %s', error)\n service = None\n _disconnect(nitro)\n return service\n" ]
# -*- coding: utf-8 -*- ''' Module to provide Citrix Netscaler compatibility to Salt (compatible with netscaler 9.2+) .. versionadded:: 2015.2.0 :depends: - nsnitro Python module .. note:: You can install nsnitro using: .. code-block:: bash pip install nsnitro :configuration: This module accepts connection configuration details either as parameters, or as configuration settings in /etc/salt/minion on the relevant minions .. code-block:: yaml netscaler.host: 1.2.3.4 netscaler.user: user netscaler.pass: password This data can also be passed into pillar. Options passed into opts will overwrite options passed into pillar. :CLI Examples: Calls relying on configuration passed using /etc/salt/minion, grains, or pillars: .. code-block:: bash salt-call netscaler.server_exists server_name Calls passing configuration as opts .. code-block:: bash salt-call netscaler.server_exists server_name netscaler_host=1.2.3.4 netscaler_user=username netscaler_pass=password salt-call netscaler.server_exists server_name netscaler_host=1.2.3.5 netscaler_user=username2 netscaler_pass=password2 salt-call netscaler.server_enable server_name2 netscaler_host=1.2.3.5 salt-call netscaler.server_up server_name3 netscaler_host=1.2.3.6 netscaler_useSSL=False ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import logging # Import Salt libs import salt.utils.platform try: from nsnitro.nsnitro import NSNitro from nsnitro.nsexceptions import NSNitroError from nsnitro.nsresources.nsserver import NSServer from nsnitro.nsresources.nsservice import NSService from nsnitro.nsresources.nsservicegroup import NSServiceGroup from nsnitro.nsresources.nsservicegroupserverbinding import NSServiceGroupServerBinding from nsnitro.nsresources.nslbvserver import NSLBVServer from nsnitro.nsresources.nslbvserverservicegroupbinding import NSLBVServerServiceGroupBinding from nsnitro.nsresources.nssslvserversslcertkeybinding import NSSSLVServerSSLCertKeyBinding HAS_NSNITRO = True except ImportError: HAS_NSNITRO = False log = logging.getLogger(__name__) def __virtual__(): ''' Only load this module if the nsnitro library is installed ''' if salt.utils.platform.is_windows(): return ( False, 'The netscaler execution module failed to load: not available ' 'on Windows.' ) if HAS_NSNITRO: return 'netscaler' return ( False, 'The netscaler execution module failed to load: the nsnitro python ' 'library is not available.' ) def _connect(**kwargs): ''' Initialise netscaler connection ''' connargs = dict() # Shamelessy ripped from the mysql module def __connarg(name, key=None, default=None): ''' Add key to connargs, only if name exists in our kwargs or as netscaler.<name> in __opts__ or __pillar__ Evaluate in said order - kwargs, opts then pillar. To avoid collision with other functions, kwargs-based connection arguments are prefixed with 'netscaler_' (i.e. 'netscaler_host', 'netscaler_user', etc.). ''' if key is None: key = name if name in kwargs: connargs[key] = kwargs[name] else: prefix = 'netscaler_' if name.startswith(prefix): try: name = name[len(prefix):] except IndexError: return val = __salt__['config.option']('netscaler.{0}'.format(name), None) if val is not None: connargs[key] = val elif default is not None: connargs[key] = default __connarg('netscaler_host', 'host') __connarg('netscaler_user', 'user') __connarg('netscaler_pass', 'pass') __connarg('netscaler_useSSL', 'useSSL', True) nitro = NSNitro(connargs['host'], connargs['user'], connargs['pass'], connargs['useSSL']) try: nitro.login() except NSNitroError as error: log.debug('netscaler module error - NSNitro.login() failed: %s', error) return None return nitro def _disconnect(nitro): try: nitro.logout() except NSNitroError as error: log.debug('netscaler module error - NSNitro.logout() failed: %s', error) return None return nitro def _servicegroup_get(sg_name, **connection_args): ''' Return a service group ressource or None ''' nitro = _connect(**connection_args) if nitro is None: return None sg = NSServiceGroup() sg.set_servicegroupname(sg_name) try: sg = NSServiceGroup.get(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.get() failed: %s', error) sg = None _disconnect(nitro) return sg def _servicegroup_get_servers(sg_name, **connection_args): ''' Returns a list of members of a servicegroup or None ''' nitro = _connect(**connection_args) if nitro is None: return None sg = NSServiceGroup() sg.set_servicegroupname(sg_name) try: sg = NSServiceGroup.get_servers(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.get_servers failed(): %s', error) sg = None _disconnect(nitro) return sg def _servicegroup_get_server(sg_name, s_name, s_port=None, **connection_args): ''' Returns a member of a service group or None ''' ret = None servers = _servicegroup_get_servers(sg_name, **connection_args) if servers is None: return None for server in servers: if server.get_servername() == s_name: if s_port is not None and s_port != server.get_port(): ret = None ret = server return ret def servicegroup_exists(sg_name, sg_type=None, **connection_args): ''' Checks if a service group exists CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_exists 'serviceGroupName' ''' sg = _servicegroup_get(sg_name, **connection_args) if sg is None: return False if sg_type is not None and sg_type.upper() != sg.get_servicetype(): return False return True def servicegroup_add(sg_name, sg_type='HTTP', **connection_args): ''' Add a new service group If no service type is specified, HTTP will be used. Most common service types: HTTP, SSL, and SSL_BRIDGE CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_add 'serviceGroupName' salt '*' netscaler.servicegroup_add 'serviceGroupName' 'serviceGroupType' ''' ret = True if servicegroup_exists(sg_name): return False nitro = _connect(**connection_args) if nitro is None: return False sg = NSServiceGroup() sg.set_servicegroupname(sg_name) sg.set_servicetype(sg_type.upper()) try: NSServiceGroup.add(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.add() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_delete(sg_name, **connection_args): ''' Delete a new service group CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_delete 'serviceGroupName' ''' ret = True sg = _servicegroup_get(sg_name, **connection_args) if sg is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.delete(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_exists(sg_name, s_name, s_port=None, **connection_args): ''' Check if a server:port combination is a member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_exists 'serviceGroupName' 'serverName' 'serverPort' ''' return _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) is not None def servicegroup_server_up(sg_name, s_name, s_port, **connection_args): ''' Check if a server:port combination is in state UP in a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_up 'serviceGroupName' 'serverName' 'serverPort' ''' server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) return server is not None and server.get_svrstate() == 'UP' def servicegroup_server_enable(sg_name, s_name, s_port, **connection_args): ''' Enable a server:port member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_enable 'serviceGroupName' 'serverName' 'serverPort' ''' ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.enable_server(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.enable_server() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_disable(sg_name, s_name, s_port, **connection_args): ''' Disable a server:port member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_disable 'serviceGroupName' 'serverName' 'serverPort' ''' ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.disable_server(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.disable_server() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_add(sg_name, s_name, s_port, **connection_args): ''' Add a server:port member to a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_add 'serviceGroupName' 'serverName' 'serverPort' ''' # Nitro will throw an error if the server is already present ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is not None: return False nitro = _connect(**connection_args) if nitro is None: return False sgsb = NSServiceGroupServerBinding() sgsb.set_servicegroupname(sg_name) sgsb.set_servername(s_name) sgsb.set_port(s_port) try: NSServiceGroupServerBinding.add(nitro, sgsb) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroupServerBinding() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_delete(sg_name, s_name, s_port, **connection_args): ''' Remove a server:port member from a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_delete 'serviceGroupName' 'serverName' 'serverPort' ''' # Nitro will throw an error if the server is already not present ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False sgsb = NSServiceGroupServerBinding() sgsb.set_servicegroupname(sg_name) sgsb.set_servername(s_name) sgsb.set_port(s_port) try: NSServiceGroupServerBinding.delete(nitro, sgsb) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroupServerBinding() failed: %s', error) ret = False _disconnect(nitro) return ret def _service_get(s_name, **connection_args): ''' Returns a service ressource or None ''' nitro = _connect(**connection_args) if nitro is None: return None service = NSService() service.set_name(s_name) try: service = NSService.get(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.get() failed: %s', error) service = None _disconnect(nitro) return service def service_exists(s_name, **connection_args): ''' Checks if a service exists CLI Example: .. code-block:: bash salt '*' netscaler.service_exists 'serviceName' ''' return _service_get(s_name, **connection_args) is not None def service_up(s_name, **connection_args): ''' Checks if a service is UP CLI Example: .. code-block:: bash salt '*' netscaler.service_up 'serviceName' ''' service = _service_get(s_name, **connection_args) return service is not None and service.get_svrstate() == 'UP' def service_disable(s_name, s_delay=None, **connection_args): ''' Disable a service CLI Example: .. code-block:: bash salt '*' netscaler.service_disable 'serviceName' salt '*' netscaler.service_disable 'serviceName' 'delayInSeconds' ''' ret = True service = _service_get(s_name, **connection_args) if service is None: return False if s_delay is not None: service.set_delay(s_delay) nitro = _connect(**connection_args) if nitro is None: return False try: NSService.disable(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def _server_get(s_name, **connection_args): nitro = _connect(**connection_args) if nitro is None: return None server = NSServer() server.set_name(s_name) try: server = NSServer.get(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.get() failed: %s', error) server = None _disconnect(nitro) return server def server_exists(s_name, ip=None, s_state=None, **connection_args): ''' Checks if a server exists CLI Example: .. code-block:: bash salt '*' netscaler.server_exists 'serverName' ''' server = _server_get(s_name, **connection_args) if server is None: return False if ip is not None and ip != server.get_ipaddress(): return False if s_state is not None and s_state.upper() != server.get_state(): return False return True def server_add(s_name, s_ip, s_state=None, **connection_args): ''' Add a server Note: The default server state is ENABLED CLI Example: .. code-block:: bash salt '*' netscaler.server_add 'serverName' 'serverIpAddress' salt '*' netscaler.server_add 'serverName' 'serverIpAddress' 'serverState' ''' ret = True if server_exists(s_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False server = NSServer() server.set_name(s_name) server.set_ipaddress(s_ip) if s_state is not None: server.set_state(s_state) try: NSServer.add(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.add() failed: %s', error) ret = False _disconnect(nitro) return ret def server_delete(s_name, **connection_args): ''' Delete a server CLI Example: .. code-block:: bash salt '*' netscaler.server_delete 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.delete(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def server_update(s_name, s_ip, **connection_args): ''' Update a server's attributes CLI Example: .. code-block:: bash salt '*' netscaler.server_update 'serverName' 'serverIP' ''' altered = False cur_server = _server_get(s_name, **connection_args) if cur_server is None: return False alt_server = NSServer() alt_server.set_name(s_name) if cur_server.get_ipaddress() != s_ip: alt_server.set_ipaddress(s_ip) altered = True # Nothing to update, the server is already idem if altered is False: return False # Perform the update nitro = _connect(**connection_args) if nitro is None: return False ret = True try: NSServer.update(nitro, alt_server) except NSNitroError as error: log.debug('netscaler module error - NSServer.update() failed: %s', error) ret = False _disconnect(nitro) return ret def server_enabled(s_name, **connection_args): ''' Check if a server is enabled globally CLI Example: .. code-block:: bash salt '*' netscaler.server_enabled 'serverName' ''' server = _server_get(s_name, **connection_args) return server is not None and server.get_state() == 'ENABLED' def server_enable(s_name, **connection_args): ''' Enables a server globally CLI Example: .. code-block:: bash salt '*' netscaler.server_enable 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False if server.get_state() == 'ENABLED': return True nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.enable(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def server_disable(s_name, **connection_args): ''' Disable a server globally CLI Example: .. code-block:: bash salt '*' netscaler.server_disable 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False if server.get_state() == 'DISABLED': return True nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.disable(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.disable() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_get(v_name, **connection_args): nitro = _connect(**connection_args) vserver = NSLBVServer() vserver.set_name(v_name) if nitro is None: return None try: vserver = NSLBVServer.get(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSLBVServer.get() failed: %s', error) vserver = None _disconnect(nitro) return vserver def vserver_exists(v_name, v_ip=None, v_port=None, v_type=None, **connection_args): ''' Checks if a vserver exists CLI Example: .. code-block:: bash salt '*' netscaler.vserver_exists 'vserverName' ''' vserver = _vserver_get(v_name, **connection_args) if vserver is None: return False if v_ip is not None and vserver.get_ipv46() != v_ip: return False if v_port is not None and vserver.get_port() != v_port: return False if v_type is not None and vserver.get_servicetype().upper() != v_type.upper(): return False return True def vserver_add(v_name, v_ip, v_port, v_type, **connection_args): ''' Add a new lb vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_add 'vserverName' 'vserverIP' 'vserverPort' 'vserverType' salt '*' netscaler.vserver_add 'alex.patate.chaude.443' '1.2.3.4' '443' 'SSL' ''' ret = True if vserver_exists(v_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vserver = NSLBVServer() vserver.set_name(v_name) vserver.set_ipv46(v_ip) vserver.set_port(v_port) vserver.set_servicetype(v_type.upper()) try: NSLBVServer.add(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSLBVServer.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_delete(v_name, **connection_args): ''' Delete a lb vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_delete 'vserverName' ''' ret = True vserver = _vserver_get(v_name, **connection_args) if vserver is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSLBVServer.delete(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSVServer.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_servicegroup_get(v_name, sg_name, **connection_args): ret = None nitro = _connect(**connection_args) if nitro is None: return None vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) try: vsgs = NSLBVServerServiceGroupBinding.get(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.get() failed: %s', error) return None for vsg in vsgs: if vsg.get_servicegroupname() == sg_name: ret = vsg _disconnect(nitro) return ret def vserver_servicegroup_exists(v_name, sg_name, **connection_args): ''' Checks if a servicegroup is tied to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_exists 'vserverName' 'serviceGroupName' ''' return _vserver_servicegroup_get(v_name, sg_name, **connection_args) is not None def vserver_servicegroup_add(v_name, sg_name, **connection_args): ''' Bind a servicegroup to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_add 'vserverName' 'serviceGroupName' ''' ret = True if vserver_servicegroup_exists(v_name, sg_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) vsg.set_servicegroupname(sg_name) try: NSLBVServerServiceGroupBinding.add(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_servicegroup_delete(v_name, sg_name, **connection_args): ''' Unbind a servicegroup from a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_delete 'vserverName' 'serviceGroupName' ''' ret = True if not vserver_servicegroup_exists(v_name, sg_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) vsg.set_servicegroupname(sg_name) try: NSLBVServerServiceGroupBinding.delete(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_sslcert_get(v_name, sc_name, **connection_args): ret = None nitro = _connect(**connection_args) if nitro is None: return None sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) try: sslcerts = NSSSLVServerSSLCertKeyBinding.get(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.get() failed: %s', error) return None for sslcert in sslcerts: if sslcert.get_certkeyname() == sc_name: ret = sslcert return ret def vserver_sslcert_exists(v_name, sc_name, **connection_args): ''' Checks if a SSL certificate is tied to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_exists 'vserverName' 'sslCertificateName' ''' return _vserver_sslcert_get(v_name, sc_name, **connection_args) is not None def vserver_sslcert_add(v_name, sc_name, **connection_args): ''' Binds a SSL certificate to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_add 'vserverName' 'sslCertificateName' ''' ret = True if vserver_sslcert_exists(v_name, sc_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) sslcert.set_certkeyname(sc_name) try: NSSSLVServerSSLCertKeyBinding.add(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_sslcert_delete(v_name, sc_name, **connection_args): ''' Unbinds a SSL certificate from a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_delete 'vserverName' 'sslCertificateName' ''' ret = True if not vserver_sslcert_exists(v_name, sc_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) sslcert.set_certkeyname(sc_name) try: NSSSLVServerSSLCertKeyBinding.delete(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.delete() failed: %s', error) ret = False _disconnect(nitro) return ret
saltstack/salt
salt/modules/netscaler.py
service_disable
python
def service_disable(s_name, s_delay=None, **connection_args): ''' Disable a service CLI Example: .. code-block:: bash salt '*' netscaler.service_disable 'serviceName' salt '*' netscaler.service_disable 'serviceName' 'delayInSeconds' ''' ret = True service = _service_get(s_name, **connection_args) if service is None: return False if s_delay is not None: service.set_delay(s_delay) nitro = _connect(**connection_args) if nitro is None: return False try: NSService.disable(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.enable() failed: %s', error) ret = False _disconnect(nitro) return ret
Disable a service CLI Example: .. code-block:: bash salt '*' netscaler.service_disable 'serviceName' salt '*' netscaler.service_disable 'serviceName' 'delayInSeconds'
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/netscaler.py#L484-L510
[ "def _connect(**kwargs):\n '''\n Initialise netscaler connection\n '''\n connargs = dict()\n\n # Shamelessy ripped from the mysql module\n def __connarg(name, key=None, default=None):\n '''\n Add key to connargs, only if name exists in our kwargs or as\n netscaler.<name> in __opts__ or __pillar__ Evaluate in said order - kwargs,\n opts then pillar. To avoid collision with other functions, kwargs-based\n connection arguments are prefixed with 'netscaler_' (i.e.\n 'netscaler_host', 'netscaler_user', etc.).\n '''\n if key is None:\n key = name\n if name in kwargs:\n connargs[key] = kwargs[name]\n else:\n prefix = 'netscaler_'\n if name.startswith(prefix):\n try:\n name = name[len(prefix):]\n except IndexError:\n return\n val = __salt__['config.option']('netscaler.{0}'.format(name), None)\n if val is not None:\n connargs[key] = val\n elif default is not None:\n connargs[key] = default\n\n __connarg('netscaler_host', 'host')\n __connarg('netscaler_user', 'user')\n __connarg('netscaler_pass', 'pass')\n __connarg('netscaler_useSSL', 'useSSL', True)\n\n nitro = NSNitro(connargs['host'], connargs['user'], connargs['pass'], connargs['useSSL'])\n try:\n nitro.login()\n except NSNitroError as error:\n log.debug('netscaler module error - NSNitro.login() failed: %s', error)\n return None\n return nitro\n", "def _disconnect(nitro):\n try:\n nitro.logout()\n except NSNitroError as error:\n log.debug('netscaler module error - NSNitro.logout() failed: %s', error)\n return None\n return nitro\n", "def _service_get(s_name, **connection_args):\n '''\n Returns a service ressource or None\n '''\n nitro = _connect(**connection_args)\n if nitro is None:\n return None\n service = NSService()\n service.set_name(s_name)\n try:\n service = NSService.get(nitro, service)\n except NSNitroError as error:\n log.debug('netscaler module error - NSService.get() failed: %s', error)\n service = None\n _disconnect(nitro)\n return service\n" ]
# -*- coding: utf-8 -*- ''' Module to provide Citrix Netscaler compatibility to Salt (compatible with netscaler 9.2+) .. versionadded:: 2015.2.0 :depends: - nsnitro Python module .. note:: You can install nsnitro using: .. code-block:: bash pip install nsnitro :configuration: This module accepts connection configuration details either as parameters, or as configuration settings in /etc/salt/minion on the relevant minions .. code-block:: yaml netscaler.host: 1.2.3.4 netscaler.user: user netscaler.pass: password This data can also be passed into pillar. Options passed into opts will overwrite options passed into pillar. :CLI Examples: Calls relying on configuration passed using /etc/salt/minion, grains, or pillars: .. code-block:: bash salt-call netscaler.server_exists server_name Calls passing configuration as opts .. code-block:: bash salt-call netscaler.server_exists server_name netscaler_host=1.2.3.4 netscaler_user=username netscaler_pass=password salt-call netscaler.server_exists server_name netscaler_host=1.2.3.5 netscaler_user=username2 netscaler_pass=password2 salt-call netscaler.server_enable server_name2 netscaler_host=1.2.3.5 salt-call netscaler.server_up server_name3 netscaler_host=1.2.3.6 netscaler_useSSL=False ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import logging # Import Salt libs import salt.utils.platform try: from nsnitro.nsnitro import NSNitro from nsnitro.nsexceptions import NSNitroError from nsnitro.nsresources.nsserver import NSServer from nsnitro.nsresources.nsservice import NSService from nsnitro.nsresources.nsservicegroup import NSServiceGroup from nsnitro.nsresources.nsservicegroupserverbinding import NSServiceGroupServerBinding from nsnitro.nsresources.nslbvserver import NSLBVServer from nsnitro.nsresources.nslbvserverservicegroupbinding import NSLBVServerServiceGroupBinding from nsnitro.nsresources.nssslvserversslcertkeybinding import NSSSLVServerSSLCertKeyBinding HAS_NSNITRO = True except ImportError: HAS_NSNITRO = False log = logging.getLogger(__name__) def __virtual__(): ''' Only load this module if the nsnitro library is installed ''' if salt.utils.platform.is_windows(): return ( False, 'The netscaler execution module failed to load: not available ' 'on Windows.' ) if HAS_NSNITRO: return 'netscaler' return ( False, 'The netscaler execution module failed to load: the nsnitro python ' 'library is not available.' ) def _connect(**kwargs): ''' Initialise netscaler connection ''' connargs = dict() # Shamelessy ripped from the mysql module def __connarg(name, key=None, default=None): ''' Add key to connargs, only if name exists in our kwargs or as netscaler.<name> in __opts__ or __pillar__ Evaluate in said order - kwargs, opts then pillar. To avoid collision with other functions, kwargs-based connection arguments are prefixed with 'netscaler_' (i.e. 'netscaler_host', 'netscaler_user', etc.). ''' if key is None: key = name if name in kwargs: connargs[key] = kwargs[name] else: prefix = 'netscaler_' if name.startswith(prefix): try: name = name[len(prefix):] except IndexError: return val = __salt__['config.option']('netscaler.{0}'.format(name), None) if val is not None: connargs[key] = val elif default is not None: connargs[key] = default __connarg('netscaler_host', 'host') __connarg('netscaler_user', 'user') __connarg('netscaler_pass', 'pass') __connarg('netscaler_useSSL', 'useSSL', True) nitro = NSNitro(connargs['host'], connargs['user'], connargs['pass'], connargs['useSSL']) try: nitro.login() except NSNitroError as error: log.debug('netscaler module error - NSNitro.login() failed: %s', error) return None return nitro def _disconnect(nitro): try: nitro.logout() except NSNitroError as error: log.debug('netscaler module error - NSNitro.logout() failed: %s', error) return None return nitro def _servicegroup_get(sg_name, **connection_args): ''' Return a service group ressource or None ''' nitro = _connect(**connection_args) if nitro is None: return None sg = NSServiceGroup() sg.set_servicegroupname(sg_name) try: sg = NSServiceGroup.get(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.get() failed: %s', error) sg = None _disconnect(nitro) return sg def _servicegroup_get_servers(sg_name, **connection_args): ''' Returns a list of members of a servicegroup or None ''' nitro = _connect(**connection_args) if nitro is None: return None sg = NSServiceGroup() sg.set_servicegroupname(sg_name) try: sg = NSServiceGroup.get_servers(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.get_servers failed(): %s', error) sg = None _disconnect(nitro) return sg def _servicegroup_get_server(sg_name, s_name, s_port=None, **connection_args): ''' Returns a member of a service group or None ''' ret = None servers = _servicegroup_get_servers(sg_name, **connection_args) if servers is None: return None for server in servers: if server.get_servername() == s_name: if s_port is not None and s_port != server.get_port(): ret = None ret = server return ret def servicegroup_exists(sg_name, sg_type=None, **connection_args): ''' Checks if a service group exists CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_exists 'serviceGroupName' ''' sg = _servicegroup_get(sg_name, **connection_args) if sg is None: return False if sg_type is not None and sg_type.upper() != sg.get_servicetype(): return False return True def servicegroup_add(sg_name, sg_type='HTTP', **connection_args): ''' Add a new service group If no service type is specified, HTTP will be used. Most common service types: HTTP, SSL, and SSL_BRIDGE CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_add 'serviceGroupName' salt '*' netscaler.servicegroup_add 'serviceGroupName' 'serviceGroupType' ''' ret = True if servicegroup_exists(sg_name): return False nitro = _connect(**connection_args) if nitro is None: return False sg = NSServiceGroup() sg.set_servicegroupname(sg_name) sg.set_servicetype(sg_type.upper()) try: NSServiceGroup.add(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.add() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_delete(sg_name, **connection_args): ''' Delete a new service group CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_delete 'serviceGroupName' ''' ret = True sg = _servicegroup_get(sg_name, **connection_args) if sg is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.delete(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_exists(sg_name, s_name, s_port=None, **connection_args): ''' Check if a server:port combination is a member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_exists 'serviceGroupName' 'serverName' 'serverPort' ''' return _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) is not None def servicegroup_server_up(sg_name, s_name, s_port, **connection_args): ''' Check if a server:port combination is in state UP in a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_up 'serviceGroupName' 'serverName' 'serverPort' ''' server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) return server is not None and server.get_svrstate() == 'UP' def servicegroup_server_enable(sg_name, s_name, s_port, **connection_args): ''' Enable a server:port member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_enable 'serviceGroupName' 'serverName' 'serverPort' ''' ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.enable_server(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.enable_server() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_disable(sg_name, s_name, s_port, **connection_args): ''' Disable a server:port member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_disable 'serviceGroupName' 'serverName' 'serverPort' ''' ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.disable_server(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.disable_server() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_add(sg_name, s_name, s_port, **connection_args): ''' Add a server:port member to a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_add 'serviceGroupName' 'serverName' 'serverPort' ''' # Nitro will throw an error if the server is already present ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is not None: return False nitro = _connect(**connection_args) if nitro is None: return False sgsb = NSServiceGroupServerBinding() sgsb.set_servicegroupname(sg_name) sgsb.set_servername(s_name) sgsb.set_port(s_port) try: NSServiceGroupServerBinding.add(nitro, sgsb) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroupServerBinding() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_delete(sg_name, s_name, s_port, **connection_args): ''' Remove a server:port member from a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_delete 'serviceGroupName' 'serverName' 'serverPort' ''' # Nitro will throw an error if the server is already not present ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False sgsb = NSServiceGroupServerBinding() sgsb.set_servicegroupname(sg_name) sgsb.set_servername(s_name) sgsb.set_port(s_port) try: NSServiceGroupServerBinding.delete(nitro, sgsb) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroupServerBinding() failed: %s', error) ret = False _disconnect(nitro) return ret def _service_get(s_name, **connection_args): ''' Returns a service ressource or None ''' nitro = _connect(**connection_args) if nitro is None: return None service = NSService() service.set_name(s_name) try: service = NSService.get(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.get() failed: %s', error) service = None _disconnect(nitro) return service def service_exists(s_name, **connection_args): ''' Checks if a service exists CLI Example: .. code-block:: bash salt '*' netscaler.service_exists 'serviceName' ''' return _service_get(s_name, **connection_args) is not None def service_up(s_name, **connection_args): ''' Checks if a service is UP CLI Example: .. code-block:: bash salt '*' netscaler.service_up 'serviceName' ''' service = _service_get(s_name, **connection_args) return service is not None and service.get_svrstate() == 'UP' def service_enable(s_name, **connection_args): ''' Enable a service CLI Example: .. code-block:: bash salt '*' netscaler.service_enable 'serviceName' ''' ret = True service = _service_get(s_name, **connection_args) if service is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSService.enable(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def _server_get(s_name, **connection_args): nitro = _connect(**connection_args) if nitro is None: return None server = NSServer() server.set_name(s_name) try: server = NSServer.get(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.get() failed: %s', error) server = None _disconnect(nitro) return server def server_exists(s_name, ip=None, s_state=None, **connection_args): ''' Checks if a server exists CLI Example: .. code-block:: bash salt '*' netscaler.server_exists 'serverName' ''' server = _server_get(s_name, **connection_args) if server is None: return False if ip is not None and ip != server.get_ipaddress(): return False if s_state is not None and s_state.upper() != server.get_state(): return False return True def server_add(s_name, s_ip, s_state=None, **connection_args): ''' Add a server Note: The default server state is ENABLED CLI Example: .. code-block:: bash salt '*' netscaler.server_add 'serverName' 'serverIpAddress' salt '*' netscaler.server_add 'serverName' 'serverIpAddress' 'serverState' ''' ret = True if server_exists(s_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False server = NSServer() server.set_name(s_name) server.set_ipaddress(s_ip) if s_state is not None: server.set_state(s_state) try: NSServer.add(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.add() failed: %s', error) ret = False _disconnect(nitro) return ret def server_delete(s_name, **connection_args): ''' Delete a server CLI Example: .. code-block:: bash salt '*' netscaler.server_delete 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.delete(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def server_update(s_name, s_ip, **connection_args): ''' Update a server's attributes CLI Example: .. code-block:: bash salt '*' netscaler.server_update 'serverName' 'serverIP' ''' altered = False cur_server = _server_get(s_name, **connection_args) if cur_server is None: return False alt_server = NSServer() alt_server.set_name(s_name) if cur_server.get_ipaddress() != s_ip: alt_server.set_ipaddress(s_ip) altered = True # Nothing to update, the server is already idem if altered is False: return False # Perform the update nitro = _connect(**connection_args) if nitro is None: return False ret = True try: NSServer.update(nitro, alt_server) except NSNitroError as error: log.debug('netscaler module error - NSServer.update() failed: %s', error) ret = False _disconnect(nitro) return ret def server_enabled(s_name, **connection_args): ''' Check if a server is enabled globally CLI Example: .. code-block:: bash salt '*' netscaler.server_enabled 'serverName' ''' server = _server_get(s_name, **connection_args) return server is not None and server.get_state() == 'ENABLED' def server_enable(s_name, **connection_args): ''' Enables a server globally CLI Example: .. code-block:: bash salt '*' netscaler.server_enable 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False if server.get_state() == 'ENABLED': return True nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.enable(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def server_disable(s_name, **connection_args): ''' Disable a server globally CLI Example: .. code-block:: bash salt '*' netscaler.server_disable 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False if server.get_state() == 'DISABLED': return True nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.disable(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.disable() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_get(v_name, **connection_args): nitro = _connect(**connection_args) vserver = NSLBVServer() vserver.set_name(v_name) if nitro is None: return None try: vserver = NSLBVServer.get(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSLBVServer.get() failed: %s', error) vserver = None _disconnect(nitro) return vserver def vserver_exists(v_name, v_ip=None, v_port=None, v_type=None, **connection_args): ''' Checks if a vserver exists CLI Example: .. code-block:: bash salt '*' netscaler.vserver_exists 'vserverName' ''' vserver = _vserver_get(v_name, **connection_args) if vserver is None: return False if v_ip is not None and vserver.get_ipv46() != v_ip: return False if v_port is not None and vserver.get_port() != v_port: return False if v_type is not None and vserver.get_servicetype().upper() != v_type.upper(): return False return True def vserver_add(v_name, v_ip, v_port, v_type, **connection_args): ''' Add a new lb vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_add 'vserverName' 'vserverIP' 'vserverPort' 'vserverType' salt '*' netscaler.vserver_add 'alex.patate.chaude.443' '1.2.3.4' '443' 'SSL' ''' ret = True if vserver_exists(v_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vserver = NSLBVServer() vserver.set_name(v_name) vserver.set_ipv46(v_ip) vserver.set_port(v_port) vserver.set_servicetype(v_type.upper()) try: NSLBVServer.add(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSLBVServer.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_delete(v_name, **connection_args): ''' Delete a lb vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_delete 'vserverName' ''' ret = True vserver = _vserver_get(v_name, **connection_args) if vserver is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSLBVServer.delete(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSVServer.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_servicegroup_get(v_name, sg_name, **connection_args): ret = None nitro = _connect(**connection_args) if nitro is None: return None vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) try: vsgs = NSLBVServerServiceGroupBinding.get(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.get() failed: %s', error) return None for vsg in vsgs: if vsg.get_servicegroupname() == sg_name: ret = vsg _disconnect(nitro) return ret def vserver_servicegroup_exists(v_name, sg_name, **connection_args): ''' Checks if a servicegroup is tied to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_exists 'vserverName' 'serviceGroupName' ''' return _vserver_servicegroup_get(v_name, sg_name, **connection_args) is not None def vserver_servicegroup_add(v_name, sg_name, **connection_args): ''' Bind a servicegroup to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_add 'vserverName' 'serviceGroupName' ''' ret = True if vserver_servicegroup_exists(v_name, sg_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) vsg.set_servicegroupname(sg_name) try: NSLBVServerServiceGroupBinding.add(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_servicegroup_delete(v_name, sg_name, **connection_args): ''' Unbind a servicegroup from a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_delete 'vserverName' 'serviceGroupName' ''' ret = True if not vserver_servicegroup_exists(v_name, sg_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) vsg.set_servicegroupname(sg_name) try: NSLBVServerServiceGroupBinding.delete(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_sslcert_get(v_name, sc_name, **connection_args): ret = None nitro = _connect(**connection_args) if nitro is None: return None sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) try: sslcerts = NSSSLVServerSSLCertKeyBinding.get(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.get() failed: %s', error) return None for sslcert in sslcerts: if sslcert.get_certkeyname() == sc_name: ret = sslcert return ret def vserver_sslcert_exists(v_name, sc_name, **connection_args): ''' Checks if a SSL certificate is tied to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_exists 'vserverName' 'sslCertificateName' ''' return _vserver_sslcert_get(v_name, sc_name, **connection_args) is not None def vserver_sslcert_add(v_name, sc_name, **connection_args): ''' Binds a SSL certificate to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_add 'vserverName' 'sslCertificateName' ''' ret = True if vserver_sslcert_exists(v_name, sc_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) sslcert.set_certkeyname(sc_name) try: NSSSLVServerSSLCertKeyBinding.add(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_sslcert_delete(v_name, sc_name, **connection_args): ''' Unbinds a SSL certificate from a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_delete 'vserverName' 'sslCertificateName' ''' ret = True if not vserver_sslcert_exists(v_name, sc_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) sslcert.set_certkeyname(sc_name) try: NSSSLVServerSSLCertKeyBinding.delete(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.delete() failed: %s', error) ret = False _disconnect(nitro) return ret
saltstack/salt
salt/modules/netscaler.py
server_exists
python
def server_exists(s_name, ip=None, s_state=None, **connection_args): ''' Checks if a server exists CLI Example: .. code-block:: bash salt '*' netscaler.server_exists 'serverName' ''' server = _server_get(s_name, **connection_args) if server is None: return False if ip is not None and ip != server.get_ipaddress(): return False if s_state is not None and s_state.upper() != server.get_state(): return False return True
Checks if a server exists CLI Example: .. code-block:: bash salt '*' netscaler.server_exists 'serverName'
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/netscaler.py#L528-L545
[ "def _server_get(s_name, **connection_args):\n nitro = _connect(**connection_args)\n if nitro is None:\n return None\n server = NSServer()\n server.set_name(s_name)\n try:\n server = NSServer.get(nitro, server)\n except NSNitroError as error:\n log.debug('netscaler module error - NSServer.get() failed: %s', error)\n server = None\n _disconnect(nitro)\n return server\n" ]
# -*- coding: utf-8 -*- ''' Module to provide Citrix Netscaler compatibility to Salt (compatible with netscaler 9.2+) .. versionadded:: 2015.2.0 :depends: - nsnitro Python module .. note:: You can install nsnitro using: .. code-block:: bash pip install nsnitro :configuration: This module accepts connection configuration details either as parameters, or as configuration settings in /etc/salt/minion on the relevant minions .. code-block:: yaml netscaler.host: 1.2.3.4 netscaler.user: user netscaler.pass: password This data can also be passed into pillar. Options passed into opts will overwrite options passed into pillar. :CLI Examples: Calls relying on configuration passed using /etc/salt/minion, grains, or pillars: .. code-block:: bash salt-call netscaler.server_exists server_name Calls passing configuration as opts .. code-block:: bash salt-call netscaler.server_exists server_name netscaler_host=1.2.3.4 netscaler_user=username netscaler_pass=password salt-call netscaler.server_exists server_name netscaler_host=1.2.3.5 netscaler_user=username2 netscaler_pass=password2 salt-call netscaler.server_enable server_name2 netscaler_host=1.2.3.5 salt-call netscaler.server_up server_name3 netscaler_host=1.2.3.6 netscaler_useSSL=False ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import logging # Import Salt libs import salt.utils.platform try: from nsnitro.nsnitro import NSNitro from nsnitro.nsexceptions import NSNitroError from nsnitro.nsresources.nsserver import NSServer from nsnitro.nsresources.nsservice import NSService from nsnitro.nsresources.nsservicegroup import NSServiceGroup from nsnitro.nsresources.nsservicegroupserverbinding import NSServiceGroupServerBinding from nsnitro.nsresources.nslbvserver import NSLBVServer from nsnitro.nsresources.nslbvserverservicegroupbinding import NSLBVServerServiceGroupBinding from nsnitro.nsresources.nssslvserversslcertkeybinding import NSSSLVServerSSLCertKeyBinding HAS_NSNITRO = True except ImportError: HAS_NSNITRO = False log = logging.getLogger(__name__) def __virtual__(): ''' Only load this module if the nsnitro library is installed ''' if salt.utils.platform.is_windows(): return ( False, 'The netscaler execution module failed to load: not available ' 'on Windows.' ) if HAS_NSNITRO: return 'netscaler' return ( False, 'The netscaler execution module failed to load: the nsnitro python ' 'library is not available.' ) def _connect(**kwargs): ''' Initialise netscaler connection ''' connargs = dict() # Shamelessy ripped from the mysql module def __connarg(name, key=None, default=None): ''' Add key to connargs, only if name exists in our kwargs or as netscaler.<name> in __opts__ or __pillar__ Evaluate in said order - kwargs, opts then pillar. To avoid collision with other functions, kwargs-based connection arguments are prefixed with 'netscaler_' (i.e. 'netscaler_host', 'netscaler_user', etc.). ''' if key is None: key = name if name in kwargs: connargs[key] = kwargs[name] else: prefix = 'netscaler_' if name.startswith(prefix): try: name = name[len(prefix):] except IndexError: return val = __salt__['config.option']('netscaler.{0}'.format(name), None) if val is not None: connargs[key] = val elif default is not None: connargs[key] = default __connarg('netscaler_host', 'host') __connarg('netscaler_user', 'user') __connarg('netscaler_pass', 'pass') __connarg('netscaler_useSSL', 'useSSL', True) nitro = NSNitro(connargs['host'], connargs['user'], connargs['pass'], connargs['useSSL']) try: nitro.login() except NSNitroError as error: log.debug('netscaler module error - NSNitro.login() failed: %s', error) return None return nitro def _disconnect(nitro): try: nitro.logout() except NSNitroError as error: log.debug('netscaler module error - NSNitro.logout() failed: %s', error) return None return nitro def _servicegroup_get(sg_name, **connection_args): ''' Return a service group ressource or None ''' nitro = _connect(**connection_args) if nitro is None: return None sg = NSServiceGroup() sg.set_servicegroupname(sg_name) try: sg = NSServiceGroup.get(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.get() failed: %s', error) sg = None _disconnect(nitro) return sg def _servicegroup_get_servers(sg_name, **connection_args): ''' Returns a list of members of a servicegroup or None ''' nitro = _connect(**connection_args) if nitro is None: return None sg = NSServiceGroup() sg.set_servicegroupname(sg_name) try: sg = NSServiceGroup.get_servers(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.get_servers failed(): %s', error) sg = None _disconnect(nitro) return sg def _servicegroup_get_server(sg_name, s_name, s_port=None, **connection_args): ''' Returns a member of a service group or None ''' ret = None servers = _servicegroup_get_servers(sg_name, **connection_args) if servers is None: return None for server in servers: if server.get_servername() == s_name: if s_port is not None and s_port != server.get_port(): ret = None ret = server return ret def servicegroup_exists(sg_name, sg_type=None, **connection_args): ''' Checks if a service group exists CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_exists 'serviceGroupName' ''' sg = _servicegroup_get(sg_name, **connection_args) if sg is None: return False if sg_type is not None and sg_type.upper() != sg.get_servicetype(): return False return True def servicegroup_add(sg_name, sg_type='HTTP', **connection_args): ''' Add a new service group If no service type is specified, HTTP will be used. Most common service types: HTTP, SSL, and SSL_BRIDGE CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_add 'serviceGroupName' salt '*' netscaler.servicegroup_add 'serviceGroupName' 'serviceGroupType' ''' ret = True if servicegroup_exists(sg_name): return False nitro = _connect(**connection_args) if nitro is None: return False sg = NSServiceGroup() sg.set_servicegroupname(sg_name) sg.set_servicetype(sg_type.upper()) try: NSServiceGroup.add(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.add() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_delete(sg_name, **connection_args): ''' Delete a new service group CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_delete 'serviceGroupName' ''' ret = True sg = _servicegroup_get(sg_name, **connection_args) if sg is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.delete(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_exists(sg_name, s_name, s_port=None, **connection_args): ''' Check if a server:port combination is a member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_exists 'serviceGroupName' 'serverName' 'serverPort' ''' return _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) is not None def servicegroup_server_up(sg_name, s_name, s_port, **connection_args): ''' Check if a server:port combination is in state UP in a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_up 'serviceGroupName' 'serverName' 'serverPort' ''' server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) return server is not None and server.get_svrstate() == 'UP' def servicegroup_server_enable(sg_name, s_name, s_port, **connection_args): ''' Enable a server:port member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_enable 'serviceGroupName' 'serverName' 'serverPort' ''' ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.enable_server(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.enable_server() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_disable(sg_name, s_name, s_port, **connection_args): ''' Disable a server:port member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_disable 'serviceGroupName' 'serverName' 'serverPort' ''' ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.disable_server(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.disable_server() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_add(sg_name, s_name, s_port, **connection_args): ''' Add a server:port member to a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_add 'serviceGroupName' 'serverName' 'serverPort' ''' # Nitro will throw an error if the server is already present ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is not None: return False nitro = _connect(**connection_args) if nitro is None: return False sgsb = NSServiceGroupServerBinding() sgsb.set_servicegroupname(sg_name) sgsb.set_servername(s_name) sgsb.set_port(s_port) try: NSServiceGroupServerBinding.add(nitro, sgsb) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroupServerBinding() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_delete(sg_name, s_name, s_port, **connection_args): ''' Remove a server:port member from a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_delete 'serviceGroupName' 'serverName' 'serverPort' ''' # Nitro will throw an error if the server is already not present ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False sgsb = NSServiceGroupServerBinding() sgsb.set_servicegroupname(sg_name) sgsb.set_servername(s_name) sgsb.set_port(s_port) try: NSServiceGroupServerBinding.delete(nitro, sgsb) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroupServerBinding() failed: %s', error) ret = False _disconnect(nitro) return ret def _service_get(s_name, **connection_args): ''' Returns a service ressource or None ''' nitro = _connect(**connection_args) if nitro is None: return None service = NSService() service.set_name(s_name) try: service = NSService.get(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.get() failed: %s', error) service = None _disconnect(nitro) return service def service_exists(s_name, **connection_args): ''' Checks if a service exists CLI Example: .. code-block:: bash salt '*' netscaler.service_exists 'serviceName' ''' return _service_get(s_name, **connection_args) is not None def service_up(s_name, **connection_args): ''' Checks if a service is UP CLI Example: .. code-block:: bash salt '*' netscaler.service_up 'serviceName' ''' service = _service_get(s_name, **connection_args) return service is not None and service.get_svrstate() == 'UP' def service_enable(s_name, **connection_args): ''' Enable a service CLI Example: .. code-block:: bash salt '*' netscaler.service_enable 'serviceName' ''' ret = True service = _service_get(s_name, **connection_args) if service is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSService.enable(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def service_disable(s_name, s_delay=None, **connection_args): ''' Disable a service CLI Example: .. code-block:: bash salt '*' netscaler.service_disable 'serviceName' salt '*' netscaler.service_disable 'serviceName' 'delayInSeconds' ''' ret = True service = _service_get(s_name, **connection_args) if service is None: return False if s_delay is not None: service.set_delay(s_delay) nitro = _connect(**connection_args) if nitro is None: return False try: NSService.disable(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def _server_get(s_name, **connection_args): nitro = _connect(**connection_args) if nitro is None: return None server = NSServer() server.set_name(s_name) try: server = NSServer.get(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.get() failed: %s', error) server = None _disconnect(nitro) return server def server_add(s_name, s_ip, s_state=None, **connection_args): ''' Add a server Note: The default server state is ENABLED CLI Example: .. code-block:: bash salt '*' netscaler.server_add 'serverName' 'serverIpAddress' salt '*' netscaler.server_add 'serverName' 'serverIpAddress' 'serverState' ''' ret = True if server_exists(s_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False server = NSServer() server.set_name(s_name) server.set_ipaddress(s_ip) if s_state is not None: server.set_state(s_state) try: NSServer.add(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.add() failed: %s', error) ret = False _disconnect(nitro) return ret def server_delete(s_name, **connection_args): ''' Delete a server CLI Example: .. code-block:: bash salt '*' netscaler.server_delete 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.delete(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def server_update(s_name, s_ip, **connection_args): ''' Update a server's attributes CLI Example: .. code-block:: bash salt '*' netscaler.server_update 'serverName' 'serverIP' ''' altered = False cur_server = _server_get(s_name, **connection_args) if cur_server is None: return False alt_server = NSServer() alt_server.set_name(s_name) if cur_server.get_ipaddress() != s_ip: alt_server.set_ipaddress(s_ip) altered = True # Nothing to update, the server is already idem if altered is False: return False # Perform the update nitro = _connect(**connection_args) if nitro is None: return False ret = True try: NSServer.update(nitro, alt_server) except NSNitroError as error: log.debug('netscaler module error - NSServer.update() failed: %s', error) ret = False _disconnect(nitro) return ret def server_enabled(s_name, **connection_args): ''' Check if a server is enabled globally CLI Example: .. code-block:: bash salt '*' netscaler.server_enabled 'serverName' ''' server = _server_get(s_name, **connection_args) return server is not None and server.get_state() == 'ENABLED' def server_enable(s_name, **connection_args): ''' Enables a server globally CLI Example: .. code-block:: bash salt '*' netscaler.server_enable 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False if server.get_state() == 'ENABLED': return True nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.enable(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def server_disable(s_name, **connection_args): ''' Disable a server globally CLI Example: .. code-block:: bash salt '*' netscaler.server_disable 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False if server.get_state() == 'DISABLED': return True nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.disable(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.disable() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_get(v_name, **connection_args): nitro = _connect(**connection_args) vserver = NSLBVServer() vserver.set_name(v_name) if nitro is None: return None try: vserver = NSLBVServer.get(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSLBVServer.get() failed: %s', error) vserver = None _disconnect(nitro) return vserver def vserver_exists(v_name, v_ip=None, v_port=None, v_type=None, **connection_args): ''' Checks if a vserver exists CLI Example: .. code-block:: bash salt '*' netscaler.vserver_exists 'vserverName' ''' vserver = _vserver_get(v_name, **connection_args) if vserver is None: return False if v_ip is not None and vserver.get_ipv46() != v_ip: return False if v_port is not None and vserver.get_port() != v_port: return False if v_type is not None and vserver.get_servicetype().upper() != v_type.upper(): return False return True def vserver_add(v_name, v_ip, v_port, v_type, **connection_args): ''' Add a new lb vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_add 'vserverName' 'vserverIP' 'vserverPort' 'vserverType' salt '*' netscaler.vserver_add 'alex.patate.chaude.443' '1.2.3.4' '443' 'SSL' ''' ret = True if vserver_exists(v_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vserver = NSLBVServer() vserver.set_name(v_name) vserver.set_ipv46(v_ip) vserver.set_port(v_port) vserver.set_servicetype(v_type.upper()) try: NSLBVServer.add(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSLBVServer.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_delete(v_name, **connection_args): ''' Delete a lb vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_delete 'vserverName' ''' ret = True vserver = _vserver_get(v_name, **connection_args) if vserver is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSLBVServer.delete(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSVServer.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_servicegroup_get(v_name, sg_name, **connection_args): ret = None nitro = _connect(**connection_args) if nitro is None: return None vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) try: vsgs = NSLBVServerServiceGroupBinding.get(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.get() failed: %s', error) return None for vsg in vsgs: if vsg.get_servicegroupname() == sg_name: ret = vsg _disconnect(nitro) return ret def vserver_servicegroup_exists(v_name, sg_name, **connection_args): ''' Checks if a servicegroup is tied to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_exists 'vserverName' 'serviceGroupName' ''' return _vserver_servicegroup_get(v_name, sg_name, **connection_args) is not None def vserver_servicegroup_add(v_name, sg_name, **connection_args): ''' Bind a servicegroup to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_add 'vserverName' 'serviceGroupName' ''' ret = True if vserver_servicegroup_exists(v_name, sg_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) vsg.set_servicegroupname(sg_name) try: NSLBVServerServiceGroupBinding.add(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_servicegroup_delete(v_name, sg_name, **connection_args): ''' Unbind a servicegroup from a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_delete 'vserverName' 'serviceGroupName' ''' ret = True if not vserver_servicegroup_exists(v_name, sg_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) vsg.set_servicegroupname(sg_name) try: NSLBVServerServiceGroupBinding.delete(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_sslcert_get(v_name, sc_name, **connection_args): ret = None nitro = _connect(**connection_args) if nitro is None: return None sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) try: sslcerts = NSSSLVServerSSLCertKeyBinding.get(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.get() failed: %s', error) return None for sslcert in sslcerts: if sslcert.get_certkeyname() == sc_name: ret = sslcert return ret def vserver_sslcert_exists(v_name, sc_name, **connection_args): ''' Checks if a SSL certificate is tied to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_exists 'vserverName' 'sslCertificateName' ''' return _vserver_sslcert_get(v_name, sc_name, **connection_args) is not None def vserver_sslcert_add(v_name, sc_name, **connection_args): ''' Binds a SSL certificate to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_add 'vserverName' 'sslCertificateName' ''' ret = True if vserver_sslcert_exists(v_name, sc_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) sslcert.set_certkeyname(sc_name) try: NSSSLVServerSSLCertKeyBinding.add(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_sslcert_delete(v_name, sc_name, **connection_args): ''' Unbinds a SSL certificate from a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_delete 'vserverName' 'sslCertificateName' ''' ret = True if not vserver_sslcert_exists(v_name, sc_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) sslcert.set_certkeyname(sc_name) try: NSSSLVServerSSLCertKeyBinding.delete(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.delete() failed: %s', error) ret = False _disconnect(nitro) return ret
saltstack/salt
salt/modules/netscaler.py
server_add
python
def server_add(s_name, s_ip, s_state=None, **connection_args): ''' Add a server Note: The default server state is ENABLED CLI Example: .. code-block:: bash salt '*' netscaler.server_add 'serverName' 'serverIpAddress' salt '*' netscaler.server_add 'serverName' 'serverIpAddress' 'serverState' ''' ret = True if server_exists(s_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False server = NSServer() server.set_name(s_name) server.set_ipaddress(s_ip) if s_state is not None: server.set_state(s_state) try: NSServer.add(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.add() failed: %s', error) ret = False _disconnect(nitro) return ret
Add a server Note: The default server state is ENABLED CLI Example: .. code-block:: bash salt '*' netscaler.server_add 'serverName' 'serverIpAddress' salt '*' netscaler.server_add 'serverName' 'serverIpAddress' 'serverState'
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/netscaler.py#L548-L577
[ "def _connect(**kwargs):\n '''\n Initialise netscaler connection\n '''\n connargs = dict()\n\n # Shamelessy ripped from the mysql module\n def __connarg(name, key=None, default=None):\n '''\n Add key to connargs, only if name exists in our kwargs or as\n netscaler.<name> in __opts__ or __pillar__ Evaluate in said order - kwargs,\n opts then pillar. To avoid collision with other functions, kwargs-based\n connection arguments are prefixed with 'netscaler_' (i.e.\n 'netscaler_host', 'netscaler_user', etc.).\n '''\n if key is None:\n key = name\n if name in kwargs:\n connargs[key] = kwargs[name]\n else:\n prefix = 'netscaler_'\n if name.startswith(prefix):\n try:\n name = name[len(prefix):]\n except IndexError:\n return\n val = __salt__['config.option']('netscaler.{0}'.format(name), None)\n if val is not None:\n connargs[key] = val\n elif default is not None:\n connargs[key] = default\n\n __connarg('netscaler_host', 'host')\n __connarg('netscaler_user', 'user')\n __connarg('netscaler_pass', 'pass')\n __connarg('netscaler_useSSL', 'useSSL', True)\n\n nitro = NSNitro(connargs['host'], connargs['user'], connargs['pass'], connargs['useSSL'])\n try:\n nitro.login()\n except NSNitroError as error:\n log.debug('netscaler module error - NSNitro.login() failed: %s', error)\n return None\n return nitro\n", "def _disconnect(nitro):\n try:\n nitro.logout()\n except NSNitroError as error:\n log.debug('netscaler module error - NSNitro.logout() failed: %s', error)\n return None\n return nitro\n", "def server_exists(s_name, ip=None, s_state=None, **connection_args):\n '''\n Checks if a server exists\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' netscaler.server_exists 'serverName'\n '''\n server = _server_get(s_name, **connection_args)\n if server is None:\n return False\n if ip is not None and ip != server.get_ipaddress():\n return False\n if s_state is not None and s_state.upper() != server.get_state():\n return False\n return True\n" ]
# -*- coding: utf-8 -*- ''' Module to provide Citrix Netscaler compatibility to Salt (compatible with netscaler 9.2+) .. versionadded:: 2015.2.0 :depends: - nsnitro Python module .. note:: You can install nsnitro using: .. code-block:: bash pip install nsnitro :configuration: This module accepts connection configuration details either as parameters, or as configuration settings in /etc/salt/minion on the relevant minions .. code-block:: yaml netscaler.host: 1.2.3.4 netscaler.user: user netscaler.pass: password This data can also be passed into pillar. Options passed into opts will overwrite options passed into pillar. :CLI Examples: Calls relying on configuration passed using /etc/salt/minion, grains, or pillars: .. code-block:: bash salt-call netscaler.server_exists server_name Calls passing configuration as opts .. code-block:: bash salt-call netscaler.server_exists server_name netscaler_host=1.2.3.4 netscaler_user=username netscaler_pass=password salt-call netscaler.server_exists server_name netscaler_host=1.2.3.5 netscaler_user=username2 netscaler_pass=password2 salt-call netscaler.server_enable server_name2 netscaler_host=1.2.3.5 salt-call netscaler.server_up server_name3 netscaler_host=1.2.3.6 netscaler_useSSL=False ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import logging # Import Salt libs import salt.utils.platform try: from nsnitro.nsnitro import NSNitro from nsnitro.nsexceptions import NSNitroError from nsnitro.nsresources.nsserver import NSServer from nsnitro.nsresources.nsservice import NSService from nsnitro.nsresources.nsservicegroup import NSServiceGroup from nsnitro.nsresources.nsservicegroupserverbinding import NSServiceGroupServerBinding from nsnitro.nsresources.nslbvserver import NSLBVServer from nsnitro.nsresources.nslbvserverservicegroupbinding import NSLBVServerServiceGroupBinding from nsnitro.nsresources.nssslvserversslcertkeybinding import NSSSLVServerSSLCertKeyBinding HAS_NSNITRO = True except ImportError: HAS_NSNITRO = False log = logging.getLogger(__name__) def __virtual__(): ''' Only load this module if the nsnitro library is installed ''' if salt.utils.platform.is_windows(): return ( False, 'The netscaler execution module failed to load: not available ' 'on Windows.' ) if HAS_NSNITRO: return 'netscaler' return ( False, 'The netscaler execution module failed to load: the nsnitro python ' 'library is not available.' ) def _connect(**kwargs): ''' Initialise netscaler connection ''' connargs = dict() # Shamelessy ripped from the mysql module def __connarg(name, key=None, default=None): ''' Add key to connargs, only if name exists in our kwargs or as netscaler.<name> in __opts__ or __pillar__ Evaluate in said order - kwargs, opts then pillar. To avoid collision with other functions, kwargs-based connection arguments are prefixed with 'netscaler_' (i.e. 'netscaler_host', 'netscaler_user', etc.). ''' if key is None: key = name if name in kwargs: connargs[key] = kwargs[name] else: prefix = 'netscaler_' if name.startswith(prefix): try: name = name[len(prefix):] except IndexError: return val = __salt__['config.option']('netscaler.{0}'.format(name), None) if val is not None: connargs[key] = val elif default is not None: connargs[key] = default __connarg('netscaler_host', 'host') __connarg('netscaler_user', 'user') __connarg('netscaler_pass', 'pass') __connarg('netscaler_useSSL', 'useSSL', True) nitro = NSNitro(connargs['host'], connargs['user'], connargs['pass'], connargs['useSSL']) try: nitro.login() except NSNitroError as error: log.debug('netscaler module error - NSNitro.login() failed: %s', error) return None return nitro def _disconnect(nitro): try: nitro.logout() except NSNitroError as error: log.debug('netscaler module error - NSNitro.logout() failed: %s', error) return None return nitro def _servicegroup_get(sg_name, **connection_args): ''' Return a service group ressource or None ''' nitro = _connect(**connection_args) if nitro is None: return None sg = NSServiceGroup() sg.set_servicegroupname(sg_name) try: sg = NSServiceGroup.get(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.get() failed: %s', error) sg = None _disconnect(nitro) return sg def _servicegroup_get_servers(sg_name, **connection_args): ''' Returns a list of members of a servicegroup or None ''' nitro = _connect(**connection_args) if nitro is None: return None sg = NSServiceGroup() sg.set_servicegroupname(sg_name) try: sg = NSServiceGroup.get_servers(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.get_servers failed(): %s', error) sg = None _disconnect(nitro) return sg def _servicegroup_get_server(sg_name, s_name, s_port=None, **connection_args): ''' Returns a member of a service group or None ''' ret = None servers = _servicegroup_get_servers(sg_name, **connection_args) if servers is None: return None for server in servers: if server.get_servername() == s_name: if s_port is not None and s_port != server.get_port(): ret = None ret = server return ret def servicegroup_exists(sg_name, sg_type=None, **connection_args): ''' Checks if a service group exists CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_exists 'serviceGroupName' ''' sg = _servicegroup_get(sg_name, **connection_args) if sg is None: return False if sg_type is not None and sg_type.upper() != sg.get_servicetype(): return False return True def servicegroup_add(sg_name, sg_type='HTTP', **connection_args): ''' Add a new service group If no service type is specified, HTTP will be used. Most common service types: HTTP, SSL, and SSL_BRIDGE CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_add 'serviceGroupName' salt '*' netscaler.servicegroup_add 'serviceGroupName' 'serviceGroupType' ''' ret = True if servicegroup_exists(sg_name): return False nitro = _connect(**connection_args) if nitro is None: return False sg = NSServiceGroup() sg.set_servicegroupname(sg_name) sg.set_servicetype(sg_type.upper()) try: NSServiceGroup.add(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.add() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_delete(sg_name, **connection_args): ''' Delete a new service group CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_delete 'serviceGroupName' ''' ret = True sg = _servicegroup_get(sg_name, **connection_args) if sg is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.delete(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_exists(sg_name, s_name, s_port=None, **connection_args): ''' Check if a server:port combination is a member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_exists 'serviceGroupName' 'serverName' 'serverPort' ''' return _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) is not None def servicegroup_server_up(sg_name, s_name, s_port, **connection_args): ''' Check if a server:port combination is in state UP in a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_up 'serviceGroupName' 'serverName' 'serverPort' ''' server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) return server is not None and server.get_svrstate() == 'UP' def servicegroup_server_enable(sg_name, s_name, s_port, **connection_args): ''' Enable a server:port member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_enable 'serviceGroupName' 'serverName' 'serverPort' ''' ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.enable_server(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.enable_server() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_disable(sg_name, s_name, s_port, **connection_args): ''' Disable a server:port member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_disable 'serviceGroupName' 'serverName' 'serverPort' ''' ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.disable_server(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.disable_server() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_add(sg_name, s_name, s_port, **connection_args): ''' Add a server:port member to a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_add 'serviceGroupName' 'serverName' 'serverPort' ''' # Nitro will throw an error if the server is already present ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is not None: return False nitro = _connect(**connection_args) if nitro is None: return False sgsb = NSServiceGroupServerBinding() sgsb.set_servicegroupname(sg_name) sgsb.set_servername(s_name) sgsb.set_port(s_port) try: NSServiceGroupServerBinding.add(nitro, sgsb) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroupServerBinding() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_delete(sg_name, s_name, s_port, **connection_args): ''' Remove a server:port member from a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_delete 'serviceGroupName' 'serverName' 'serverPort' ''' # Nitro will throw an error if the server is already not present ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False sgsb = NSServiceGroupServerBinding() sgsb.set_servicegroupname(sg_name) sgsb.set_servername(s_name) sgsb.set_port(s_port) try: NSServiceGroupServerBinding.delete(nitro, sgsb) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroupServerBinding() failed: %s', error) ret = False _disconnect(nitro) return ret def _service_get(s_name, **connection_args): ''' Returns a service ressource or None ''' nitro = _connect(**connection_args) if nitro is None: return None service = NSService() service.set_name(s_name) try: service = NSService.get(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.get() failed: %s', error) service = None _disconnect(nitro) return service def service_exists(s_name, **connection_args): ''' Checks if a service exists CLI Example: .. code-block:: bash salt '*' netscaler.service_exists 'serviceName' ''' return _service_get(s_name, **connection_args) is not None def service_up(s_name, **connection_args): ''' Checks if a service is UP CLI Example: .. code-block:: bash salt '*' netscaler.service_up 'serviceName' ''' service = _service_get(s_name, **connection_args) return service is not None and service.get_svrstate() == 'UP' def service_enable(s_name, **connection_args): ''' Enable a service CLI Example: .. code-block:: bash salt '*' netscaler.service_enable 'serviceName' ''' ret = True service = _service_get(s_name, **connection_args) if service is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSService.enable(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def service_disable(s_name, s_delay=None, **connection_args): ''' Disable a service CLI Example: .. code-block:: bash salt '*' netscaler.service_disable 'serviceName' salt '*' netscaler.service_disable 'serviceName' 'delayInSeconds' ''' ret = True service = _service_get(s_name, **connection_args) if service is None: return False if s_delay is not None: service.set_delay(s_delay) nitro = _connect(**connection_args) if nitro is None: return False try: NSService.disable(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def _server_get(s_name, **connection_args): nitro = _connect(**connection_args) if nitro is None: return None server = NSServer() server.set_name(s_name) try: server = NSServer.get(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.get() failed: %s', error) server = None _disconnect(nitro) return server def server_exists(s_name, ip=None, s_state=None, **connection_args): ''' Checks if a server exists CLI Example: .. code-block:: bash salt '*' netscaler.server_exists 'serverName' ''' server = _server_get(s_name, **connection_args) if server is None: return False if ip is not None and ip != server.get_ipaddress(): return False if s_state is not None and s_state.upper() != server.get_state(): return False return True def server_delete(s_name, **connection_args): ''' Delete a server CLI Example: .. code-block:: bash salt '*' netscaler.server_delete 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.delete(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def server_update(s_name, s_ip, **connection_args): ''' Update a server's attributes CLI Example: .. code-block:: bash salt '*' netscaler.server_update 'serverName' 'serverIP' ''' altered = False cur_server = _server_get(s_name, **connection_args) if cur_server is None: return False alt_server = NSServer() alt_server.set_name(s_name) if cur_server.get_ipaddress() != s_ip: alt_server.set_ipaddress(s_ip) altered = True # Nothing to update, the server is already idem if altered is False: return False # Perform the update nitro = _connect(**connection_args) if nitro is None: return False ret = True try: NSServer.update(nitro, alt_server) except NSNitroError as error: log.debug('netscaler module error - NSServer.update() failed: %s', error) ret = False _disconnect(nitro) return ret def server_enabled(s_name, **connection_args): ''' Check if a server is enabled globally CLI Example: .. code-block:: bash salt '*' netscaler.server_enabled 'serverName' ''' server = _server_get(s_name, **connection_args) return server is not None and server.get_state() == 'ENABLED' def server_enable(s_name, **connection_args): ''' Enables a server globally CLI Example: .. code-block:: bash salt '*' netscaler.server_enable 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False if server.get_state() == 'ENABLED': return True nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.enable(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def server_disable(s_name, **connection_args): ''' Disable a server globally CLI Example: .. code-block:: bash salt '*' netscaler.server_disable 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False if server.get_state() == 'DISABLED': return True nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.disable(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.disable() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_get(v_name, **connection_args): nitro = _connect(**connection_args) vserver = NSLBVServer() vserver.set_name(v_name) if nitro is None: return None try: vserver = NSLBVServer.get(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSLBVServer.get() failed: %s', error) vserver = None _disconnect(nitro) return vserver def vserver_exists(v_name, v_ip=None, v_port=None, v_type=None, **connection_args): ''' Checks if a vserver exists CLI Example: .. code-block:: bash salt '*' netscaler.vserver_exists 'vserverName' ''' vserver = _vserver_get(v_name, **connection_args) if vserver is None: return False if v_ip is not None and vserver.get_ipv46() != v_ip: return False if v_port is not None and vserver.get_port() != v_port: return False if v_type is not None and vserver.get_servicetype().upper() != v_type.upper(): return False return True def vserver_add(v_name, v_ip, v_port, v_type, **connection_args): ''' Add a new lb vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_add 'vserverName' 'vserverIP' 'vserverPort' 'vserverType' salt '*' netscaler.vserver_add 'alex.patate.chaude.443' '1.2.3.4' '443' 'SSL' ''' ret = True if vserver_exists(v_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vserver = NSLBVServer() vserver.set_name(v_name) vserver.set_ipv46(v_ip) vserver.set_port(v_port) vserver.set_servicetype(v_type.upper()) try: NSLBVServer.add(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSLBVServer.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_delete(v_name, **connection_args): ''' Delete a lb vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_delete 'vserverName' ''' ret = True vserver = _vserver_get(v_name, **connection_args) if vserver is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSLBVServer.delete(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSVServer.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_servicegroup_get(v_name, sg_name, **connection_args): ret = None nitro = _connect(**connection_args) if nitro is None: return None vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) try: vsgs = NSLBVServerServiceGroupBinding.get(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.get() failed: %s', error) return None for vsg in vsgs: if vsg.get_servicegroupname() == sg_name: ret = vsg _disconnect(nitro) return ret def vserver_servicegroup_exists(v_name, sg_name, **connection_args): ''' Checks if a servicegroup is tied to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_exists 'vserverName' 'serviceGroupName' ''' return _vserver_servicegroup_get(v_name, sg_name, **connection_args) is not None def vserver_servicegroup_add(v_name, sg_name, **connection_args): ''' Bind a servicegroup to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_add 'vserverName' 'serviceGroupName' ''' ret = True if vserver_servicegroup_exists(v_name, sg_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) vsg.set_servicegroupname(sg_name) try: NSLBVServerServiceGroupBinding.add(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_servicegroup_delete(v_name, sg_name, **connection_args): ''' Unbind a servicegroup from a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_delete 'vserverName' 'serviceGroupName' ''' ret = True if not vserver_servicegroup_exists(v_name, sg_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) vsg.set_servicegroupname(sg_name) try: NSLBVServerServiceGroupBinding.delete(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_sslcert_get(v_name, sc_name, **connection_args): ret = None nitro = _connect(**connection_args) if nitro is None: return None sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) try: sslcerts = NSSSLVServerSSLCertKeyBinding.get(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.get() failed: %s', error) return None for sslcert in sslcerts: if sslcert.get_certkeyname() == sc_name: ret = sslcert return ret def vserver_sslcert_exists(v_name, sc_name, **connection_args): ''' Checks if a SSL certificate is tied to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_exists 'vserverName' 'sslCertificateName' ''' return _vserver_sslcert_get(v_name, sc_name, **connection_args) is not None def vserver_sslcert_add(v_name, sc_name, **connection_args): ''' Binds a SSL certificate to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_add 'vserverName' 'sslCertificateName' ''' ret = True if vserver_sslcert_exists(v_name, sc_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) sslcert.set_certkeyname(sc_name) try: NSSSLVServerSSLCertKeyBinding.add(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_sslcert_delete(v_name, sc_name, **connection_args): ''' Unbinds a SSL certificate from a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_delete 'vserverName' 'sslCertificateName' ''' ret = True if not vserver_sslcert_exists(v_name, sc_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) sslcert.set_certkeyname(sc_name) try: NSSSLVServerSSLCertKeyBinding.delete(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.delete() failed: %s', error) ret = False _disconnect(nitro) return ret
saltstack/salt
salt/modules/netscaler.py
server_delete
python
def server_delete(s_name, **connection_args): ''' Delete a server CLI Example: .. code-block:: bash salt '*' netscaler.server_delete 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.delete(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.delete() failed: %s', error) ret = False _disconnect(nitro) return ret
Delete a server CLI Example: .. code-block:: bash salt '*' netscaler.server_delete 'serverName'
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/netscaler.py#L580-L603
[ "def _connect(**kwargs):\n '''\n Initialise netscaler connection\n '''\n connargs = dict()\n\n # Shamelessy ripped from the mysql module\n def __connarg(name, key=None, default=None):\n '''\n Add key to connargs, only if name exists in our kwargs or as\n netscaler.<name> in __opts__ or __pillar__ Evaluate in said order - kwargs,\n opts then pillar. To avoid collision with other functions, kwargs-based\n connection arguments are prefixed with 'netscaler_' (i.e.\n 'netscaler_host', 'netscaler_user', etc.).\n '''\n if key is None:\n key = name\n if name in kwargs:\n connargs[key] = kwargs[name]\n else:\n prefix = 'netscaler_'\n if name.startswith(prefix):\n try:\n name = name[len(prefix):]\n except IndexError:\n return\n val = __salt__['config.option']('netscaler.{0}'.format(name), None)\n if val is not None:\n connargs[key] = val\n elif default is not None:\n connargs[key] = default\n\n __connarg('netscaler_host', 'host')\n __connarg('netscaler_user', 'user')\n __connarg('netscaler_pass', 'pass')\n __connarg('netscaler_useSSL', 'useSSL', True)\n\n nitro = NSNitro(connargs['host'], connargs['user'], connargs['pass'], connargs['useSSL'])\n try:\n nitro.login()\n except NSNitroError as error:\n log.debug('netscaler module error - NSNitro.login() failed: %s', error)\n return None\n return nitro\n", "def _disconnect(nitro):\n try:\n nitro.logout()\n except NSNitroError as error:\n log.debug('netscaler module error - NSNitro.logout() failed: %s', error)\n return None\n return nitro\n", "def _server_get(s_name, **connection_args):\n nitro = _connect(**connection_args)\n if nitro is None:\n return None\n server = NSServer()\n server.set_name(s_name)\n try:\n server = NSServer.get(nitro, server)\n except NSNitroError as error:\n log.debug('netscaler module error - NSServer.get() failed: %s', error)\n server = None\n _disconnect(nitro)\n return server\n" ]
# -*- coding: utf-8 -*- ''' Module to provide Citrix Netscaler compatibility to Salt (compatible with netscaler 9.2+) .. versionadded:: 2015.2.0 :depends: - nsnitro Python module .. note:: You can install nsnitro using: .. code-block:: bash pip install nsnitro :configuration: This module accepts connection configuration details either as parameters, or as configuration settings in /etc/salt/minion on the relevant minions .. code-block:: yaml netscaler.host: 1.2.3.4 netscaler.user: user netscaler.pass: password This data can also be passed into pillar. Options passed into opts will overwrite options passed into pillar. :CLI Examples: Calls relying on configuration passed using /etc/salt/minion, grains, or pillars: .. code-block:: bash salt-call netscaler.server_exists server_name Calls passing configuration as opts .. code-block:: bash salt-call netscaler.server_exists server_name netscaler_host=1.2.3.4 netscaler_user=username netscaler_pass=password salt-call netscaler.server_exists server_name netscaler_host=1.2.3.5 netscaler_user=username2 netscaler_pass=password2 salt-call netscaler.server_enable server_name2 netscaler_host=1.2.3.5 salt-call netscaler.server_up server_name3 netscaler_host=1.2.3.6 netscaler_useSSL=False ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import logging # Import Salt libs import salt.utils.platform try: from nsnitro.nsnitro import NSNitro from nsnitro.nsexceptions import NSNitroError from nsnitro.nsresources.nsserver import NSServer from nsnitro.nsresources.nsservice import NSService from nsnitro.nsresources.nsservicegroup import NSServiceGroup from nsnitro.nsresources.nsservicegroupserverbinding import NSServiceGroupServerBinding from nsnitro.nsresources.nslbvserver import NSLBVServer from nsnitro.nsresources.nslbvserverservicegroupbinding import NSLBVServerServiceGroupBinding from nsnitro.nsresources.nssslvserversslcertkeybinding import NSSSLVServerSSLCertKeyBinding HAS_NSNITRO = True except ImportError: HAS_NSNITRO = False log = logging.getLogger(__name__) def __virtual__(): ''' Only load this module if the nsnitro library is installed ''' if salt.utils.platform.is_windows(): return ( False, 'The netscaler execution module failed to load: not available ' 'on Windows.' ) if HAS_NSNITRO: return 'netscaler' return ( False, 'The netscaler execution module failed to load: the nsnitro python ' 'library is not available.' ) def _connect(**kwargs): ''' Initialise netscaler connection ''' connargs = dict() # Shamelessy ripped from the mysql module def __connarg(name, key=None, default=None): ''' Add key to connargs, only if name exists in our kwargs or as netscaler.<name> in __opts__ or __pillar__ Evaluate in said order - kwargs, opts then pillar. To avoid collision with other functions, kwargs-based connection arguments are prefixed with 'netscaler_' (i.e. 'netscaler_host', 'netscaler_user', etc.). ''' if key is None: key = name if name in kwargs: connargs[key] = kwargs[name] else: prefix = 'netscaler_' if name.startswith(prefix): try: name = name[len(prefix):] except IndexError: return val = __salt__['config.option']('netscaler.{0}'.format(name), None) if val is not None: connargs[key] = val elif default is not None: connargs[key] = default __connarg('netscaler_host', 'host') __connarg('netscaler_user', 'user') __connarg('netscaler_pass', 'pass') __connarg('netscaler_useSSL', 'useSSL', True) nitro = NSNitro(connargs['host'], connargs['user'], connargs['pass'], connargs['useSSL']) try: nitro.login() except NSNitroError as error: log.debug('netscaler module error - NSNitro.login() failed: %s', error) return None return nitro def _disconnect(nitro): try: nitro.logout() except NSNitroError as error: log.debug('netscaler module error - NSNitro.logout() failed: %s', error) return None return nitro def _servicegroup_get(sg_name, **connection_args): ''' Return a service group ressource or None ''' nitro = _connect(**connection_args) if nitro is None: return None sg = NSServiceGroup() sg.set_servicegroupname(sg_name) try: sg = NSServiceGroup.get(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.get() failed: %s', error) sg = None _disconnect(nitro) return sg def _servicegroup_get_servers(sg_name, **connection_args): ''' Returns a list of members of a servicegroup or None ''' nitro = _connect(**connection_args) if nitro is None: return None sg = NSServiceGroup() sg.set_servicegroupname(sg_name) try: sg = NSServiceGroup.get_servers(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.get_servers failed(): %s', error) sg = None _disconnect(nitro) return sg def _servicegroup_get_server(sg_name, s_name, s_port=None, **connection_args): ''' Returns a member of a service group or None ''' ret = None servers = _servicegroup_get_servers(sg_name, **connection_args) if servers is None: return None for server in servers: if server.get_servername() == s_name: if s_port is not None and s_port != server.get_port(): ret = None ret = server return ret def servicegroup_exists(sg_name, sg_type=None, **connection_args): ''' Checks if a service group exists CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_exists 'serviceGroupName' ''' sg = _servicegroup_get(sg_name, **connection_args) if sg is None: return False if sg_type is not None and sg_type.upper() != sg.get_servicetype(): return False return True def servicegroup_add(sg_name, sg_type='HTTP', **connection_args): ''' Add a new service group If no service type is specified, HTTP will be used. Most common service types: HTTP, SSL, and SSL_BRIDGE CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_add 'serviceGroupName' salt '*' netscaler.servicegroup_add 'serviceGroupName' 'serviceGroupType' ''' ret = True if servicegroup_exists(sg_name): return False nitro = _connect(**connection_args) if nitro is None: return False sg = NSServiceGroup() sg.set_servicegroupname(sg_name) sg.set_servicetype(sg_type.upper()) try: NSServiceGroup.add(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.add() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_delete(sg_name, **connection_args): ''' Delete a new service group CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_delete 'serviceGroupName' ''' ret = True sg = _servicegroup_get(sg_name, **connection_args) if sg is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.delete(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_exists(sg_name, s_name, s_port=None, **connection_args): ''' Check if a server:port combination is a member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_exists 'serviceGroupName' 'serverName' 'serverPort' ''' return _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) is not None def servicegroup_server_up(sg_name, s_name, s_port, **connection_args): ''' Check if a server:port combination is in state UP in a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_up 'serviceGroupName' 'serverName' 'serverPort' ''' server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) return server is not None and server.get_svrstate() == 'UP' def servicegroup_server_enable(sg_name, s_name, s_port, **connection_args): ''' Enable a server:port member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_enable 'serviceGroupName' 'serverName' 'serverPort' ''' ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.enable_server(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.enable_server() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_disable(sg_name, s_name, s_port, **connection_args): ''' Disable a server:port member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_disable 'serviceGroupName' 'serverName' 'serverPort' ''' ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.disable_server(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.disable_server() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_add(sg_name, s_name, s_port, **connection_args): ''' Add a server:port member to a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_add 'serviceGroupName' 'serverName' 'serverPort' ''' # Nitro will throw an error if the server is already present ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is not None: return False nitro = _connect(**connection_args) if nitro is None: return False sgsb = NSServiceGroupServerBinding() sgsb.set_servicegroupname(sg_name) sgsb.set_servername(s_name) sgsb.set_port(s_port) try: NSServiceGroupServerBinding.add(nitro, sgsb) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroupServerBinding() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_delete(sg_name, s_name, s_port, **connection_args): ''' Remove a server:port member from a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_delete 'serviceGroupName' 'serverName' 'serverPort' ''' # Nitro will throw an error if the server is already not present ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False sgsb = NSServiceGroupServerBinding() sgsb.set_servicegroupname(sg_name) sgsb.set_servername(s_name) sgsb.set_port(s_port) try: NSServiceGroupServerBinding.delete(nitro, sgsb) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroupServerBinding() failed: %s', error) ret = False _disconnect(nitro) return ret def _service_get(s_name, **connection_args): ''' Returns a service ressource or None ''' nitro = _connect(**connection_args) if nitro is None: return None service = NSService() service.set_name(s_name) try: service = NSService.get(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.get() failed: %s', error) service = None _disconnect(nitro) return service def service_exists(s_name, **connection_args): ''' Checks if a service exists CLI Example: .. code-block:: bash salt '*' netscaler.service_exists 'serviceName' ''' return _service_get(s_name, **connection_args) is not None def service_up(s_name, **connection_args): ''' Checks if a service is UP CLI Example: .. code-block:: bash salt '*' netscaler.service_up 'serviceName' ''' service = _service_get(s_name, **connection_args) return service is not None and service.get_svrstate() == 'UP' def service_enable(s_name, **connection_args): ''' Enable a service CLI Example: .. code-block:: bash salt '*' netscaler.service_enable 'serviceName' ''' ret = True service = _service_get(s_name, **connection_args) if service is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSService.enable(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def service_disable(s_name, s_delay=None, **connection_args): ''' Disable a service CLI Example: .. code-block:: bash salt '*' netscaler.service_disable 'serviceName' salt '*' netscaler.service_disable 'serviceName' 'delayInSeconds' ''' ret = True service = _service_get(s_name, **connection_args) if service is None: return False if s_delay is not None: service.set_delay(s_delay) nitro = _connect(**connection_args) if nitro is None: return False try: NSService.disable(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def _server_get(s_name, **connection_args): nitro = _connect(**connection_args) if nitro is None: return None server = NSServer() server.set_name(s_name) try: server = NSServer.get(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.get() failed: %s', error) server = None _disconnect(nitro) return server def server_exists(s_name, ip=None, s_state=None, **connection_args): ''' Checks if a server exists CLI Example: .. code-block:: bash salt '*' netscaler.server_exists 'serverName' ''' server = _server_get(s_name, **connection_args) if server is None: return False if ip is not None and ip != server.get_ipaddress(): return False if s_state is not None and s_state.upper() != server.get_state(): return False return True def server_add(s_name, s_ip, s_state=None, **connection_args): ''' Add a server Note: The default server state is ENABLED CLI Example: .. code-block:: bash salt '*' netscaler.server_add 'serverName' 'serverIpAddress' salt '*' netscaler.server_add 'serverName' 'serverIpAddress' 'serverState' ''' ret = True if server_exists(s_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False server = NSServer() server.set_name(s_name) server.set_ipaddress(s_ip) if s_state is not None: server.set_state(s_state) try: NSServer.add(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.add() failed: %s', error) ret = False _disconnect(nitro) return ret def server_update(s_name, s_ip, **connection_args): ''' Update a server's attributes CLI Example: .. code-block:: bash salt '*' netscaler.server_update 'serverName' 'serverIP' ''' altered = False cur_server = _server_get(s_name, **connection_args) if cur_server is None: return False alt_server = NSServer() alt_server.set_name(s_name) if cur_server.get_ipaddress() != s_ip: alt_server.set_ipaddress(s_ip) altered = True # Nothing to update, the server is already idem if altered is False: return False # Perform the update nitro = _connect(**connection_args) if nitro is None: return False ret = True try: NSServer.update(nitro, alt_server) except NSNitroError as error: log.debug('netscaler module error - NSServer.update() failed: %s', error) ret = False _disconnect(nitro) return ret def server_enabled(s_name, **connection_args): ''' Check if a server is enabled globally CLI Example: .. code-block:: bash salt '*' netscaler.server_enabled 'serverName' ''' server = _server_get(s_name, **connection_args) return server is not None and server.get_state() == 'ENABLED' def server_enable(s_name, **connection_args): ''' Enables a server globally CLI Example: .. code-block:: bash salt '*' netscaler.server_enable 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False if server.get_state() == 'ENABLED': return True nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.enable(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def server_disable(s_name, **connection_args): ''' Disable a server globally CLI Example: .. code-block:: bash salt '*' netscaler.server_disable 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False if server.get_state() == 'DISABLED': return True nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.disable(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.disable() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_get(v_name, **connection_args): nitro = _connect(**connection_args) vserver = NSLBVServer() vserver.set_name(v_name) if nitro is None: return None try: vserver = NSLBVServer.get(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSLBVServer.get() failed: %s', error) vserver = None _disconnect(nitro) return vserver def vserver_exists(v_name, v_ip=None, v_port=None, v_type=None, **connection_args): ''' Checks if a vserver exists CLI Example: .. code-block:: bash salt '*' netscaler.vserver_exists 'vserverName' ''' vserver = _vserver_get(v_name, **connection_args) if vserver is None: return False if v_ip is not None and vserver.get_ipv46() != v_ip: return False if v_port is not None and vserver.get_port() != v_port: return False if v_type is not None and vserver.get_servicetype().upper() != v_type.upper(): return False return True def vserver_add(v_name, v_ip, v_port, v_type, **connection_args): ''' Add a new lb vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_add 'vserverName' 'vserverIP' 'vserverPort' 'vserverType' salt '*' netscaler.vserver_add 'alex.patate.chaude.443' '1.2.3.4' '443' 'SSL' ''' ret = True if vserver_exists(v_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vserver = NSLBVServer() vserver.set_name(v_name) vserver.set_ipv46(v_ip) vserver.set_port(v_port) vserver.set_servicetype(v_type.upper()) try: NSLBVServer.add(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSLBVServer.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_delete(v_name, **connection_args): ''' Delete a lb vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_delete 'vserverName' ''' ret = True vserver = _vserver_get(v_name, **connection_args) if vserver is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSLBVServer.delete(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSVServer.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_servicegroup_get(v_name, sg_name, **connection_args): ret = None nitro = _connect(**connection_args) if nitro is None: return None vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) try: vsgs = NSLBVServerServiceGroupBinding.get(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.get() failed: %s', error) return None for vsg in vsgs: if vsg.get_servicegroupname() == sg_name: ret = vsg _disconnect(nitro) return ret def vserver_servicegroup_exists(v_name, sg_name, **connection_args): ''' Checks if a servicegroup is tied to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_exists 'vserverName' 'serviceGroupName' ''' return _vserver_servicegroup_get(v_name, sg_name, **connection_args) is not None def vserver_servicegroup_add(v_name, sg_name, **connection_args): ''' Bind a servicegroup to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_add 'vserverName' 'serviceGroupName' ''' ret = True if vserver_servicegroup_exists(v_name, sg_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) vsg.set_servicegroupname(sg_name) try: NSLBVServerServiceGroupBinding.add(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_servicegroup_delete(v_name, sg_name, **connection_args): ''' Unbind a servicegroup from a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_delete 'vserverName' 'serviceGroupName' ''' ret = True if not vserver_servicegroup_exists(v_name, sg_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) vsg.set_servicegroupname(sg_name) try: NSLBVServerServiceGroupBinding.delete(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_sslcert_get(v_name, sc_name, **connection_args): ret = None nitro = _connect(**connection_args) if nitro is None: return None sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) try: sslcerts = NSSSLVServerSSLCertKeyBinding.get(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.get() failed: %s', error) return None for sslcert in sslcerts: if sslcert.get_certkeyname() == sc_name: ret = sslcert return ret def vserver_sslcert_exists(v_name, sc_name, **connection_args): ''' Checks if a SSL certificate is tied to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_exists 'vserverName' 'sslCertificateName' ''' return _vserver_sslcert_get(v_name, sc_name, **connection_args) is not None def vserver_sslcert_add(v_name, sc_name, **connection_args): ''' Binds a SSL certificate to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_add 'vserverName' 'sslCertificateName' ''' ret = True if vserver_sslcert_exists(v_name, sc_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) sslcert.set_certkeyname(sc_name) try: NSSSLVServerSSLCertKeyBinding.add(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_sslcert_delete(v_name, sc_name, **connection_args): ''' Unbinds a SSL certificate from a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_delete 'vserverName' 'sslCertificateName' ''' ret = True if not vserver_sslcert_exists(v_name, sc_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) sslcert.set_certkeyname(sc_name) try: NSSSLVServerSSLCertKeyBinding.delete(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.delete() failed: %s', error) ret = False _disconnect(nitro) return ret
saltstack/salt
salt/modules/netscaler.py
server_update
python
def server_update(s_name, s_ip, **connection_args): ''' Update a server's attributes CLI Example: .. code-block:: bash salt '*' netscaler.server_update 'serverName' 'serverIP' ''' altered = False cur_server = _server_get(s_name, **connection_args) if cur_server is None: return False alt_server = NSServer() alt_server.set_name(s_name) if cur_server.get_ipaddress() != s_ip: alt_server.set_ipaddress(s_ip) altered = True # Nothing to update, the server is already idem if altered is False: return False # Perform the update nitro = _connect(**connection_args) if nitro is None: return False ret = True try: NSServer.update(nitro, alt_server) except NSNitroError as error: log.debug('netscaler module error - NSServer.update() failed: %s', error) ret = False _disconnect(nitro) return ret
Update a server's attributes CLI Example: .. code-block:: bash salt '*' netscaler.server_update 'serverName' 'serverIP'
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/netscaler.py#L606-L639
[ "def _connect(**kwargs):\n '''\n Initialise netscaler connection\n '''\n connargs = dict()\n\n # Shamelessy ripped from the mysql module\n def __connarg(name, key=None, default=None):\n '''\n Add key to connargs, only if name exists in our kwargs or as\n netscaler.<name> in __opts__ or __pillar__ Evaluate in said order - kwargs,\n opts then pillar. To avoid collision with other functions, kwargs-based\n connection arguments are prefixed with 'netscaler_' (i.e.\n 'netscaler_host', 'netscaler_user', etc.).\n '''\n if key is None:\n key = name\n if name in kwargs:\n connargs[key] = kwargs[name]\n else:\n prefix = 'netscaler_'\n if name.startswith(prefix):\n try:\n name = name[len(prefix):]\n except IndexError:\n return\n val = __salt__['config.option']('netscaler.{0}'.format(name), None)\n if val is not None:\n connargs[key] = val\n elif default is not None:\n connargs[key] = default\n\n __connarg('netscaler_host', 'host')\n __connarg('netscaler_user', 'user')\n __connarg('netscaler_pass', 'pass')\n __connarg('netscaler_useSSL', 'useSSL', True)\n\n nitro = NSNitro(connargs['host'], connargs['user'], connargs['pass'], connargs['useSSL'])\n try:\n nitro.login()\n except NSNitroError as error:\n log.debug('netscaler module error - NSNitro.login() failed: %s', error)\n return None\n return nitro\n", "def _disconnect(nitro):\n try:\n nitro.logout()\n except NSNitroError as error:\n log.debug('netscaler module error - NSNitro.logout() failed: %s', error)\n return None\n return nitro\n", "def _server_get(s_name, **connection_args):\n nitro = _connect(**connection_args)\n if nitro is None:\n return None\n server = NSServer()\n server.set_name(s_name)\n try:\n server = NSServer.get(nitro, server)\n except NSNitroError as error:\n log.debug('netscaler module error - NSServer.get() failed: %s', error)\n server = None\n _disconnect(nitro)\n return server\n" ]
# -*- coding: utf-8 -*- ''' Module to provide Citrix Netscaler compatibility to Salt (compatible with netscaler 9.2+) .. versionadded:: 2015.2.0 :depends: - nsnitro Python module .. note:: You can install nsnitro using: .. code-block:: bash pip install nsnitro :configuration: This module accepts connection configuration details either as parameters, or as configuration settings in /etc/salt/minion on the relevant minions .. code-block:: yaml netscaler.host: 1.2.3.4 netscaler.user: user netscaler.pass: password This data can also be passed into pillar. Options passed into opts will overwrite options passed into pillar. :CLI Examples: Calls relying on configuration passed using /etc/salt/minion, grains, or pillars: .. code-block:: bash salt-call netscaler.server_exists server_name Calls passing configuration as opts .. code-block:: bash salt-call netscaler.server_exists server_name netscaler_host=1.2.3.4 netscaler_user=username netscaler_pass=password salt-call netscaler.server_exists server_name netscaler_host=1.2.3.5 netscaler_user=username2 netscaler_pass=password2 salt-call netscaler.server_enable server_name2 netscaler_host=1.2.3.5 salt-call netscaler.server_up server_name3 netscaler_host=1.2.3.6 netscaler_useSSL=False ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import logging # Import Salt libs import salt.utils.platform try: from nsnitro.nsnitro import NSNitro from nsnitro.nsexceptions import NSNitroError from nsnitro.nsresources.nsserver import NSServer from nsnitro.nsresources.nsservice import NSService from nsnitro.nsresources.nsservicegroup import NSServiceGroup from nsnitro.nsresources.nsservicegroupserverbinding import NSServiceGroupServerBinding from nsnitro.nsresources.nslbvserver import NSLBVServer from nsnitro.nsresources.nslbvserverservicegroupbinding import NSLBVServerServiceGroupBinding from nsnitro.nsresources.nssslvserversslcertkeybinding import NSSSLVServerSSLCertKeyBinding HAS_NSNITRO = True except ImportError: HAS_NSNITRO = False log = logging.getLogger(__name__) def __virtual__(): ''' Only load this module if the nsnitro library is installed ''' if salt.utils.platform.is_windows(): return ( False, 'The netscaler execution module failed to load: not available ' 'on Windows.' ) if HAS_NSNITRO: return 'netscaler' return ( False, 'The netscaler execution module failed to load: the nsnitro python ' 'library is not available.' ) def _connect(**kwargs): ''' Initialise netscaler connection ''' connargs = dict() # Shamelessy ripped from the mysql module def __connarg(name, key=None, default=None): ''' Add key to connargs, only if name exists in our kwargs or as netscaler.<name> in __opts__ or __pillar__ Evaluate in said order - kwargs, opts then pillar. To avoid collision with other functions, kwargs-based connection arguments are prefixed with 'netscaler_' (i.e. 'netscaler_host', 'netscaler_user', etc.). ''' if key is None: key = name if name in kwargs: connargs[key] = kwargs[name] else: prefix = 'netscaler_' if name.startswith(prefix): try: name = name[len(prefix):] except IndexError: return val = __salt__['config.option']('netscaler.{0}'.format(name), None) if val is not None: connargs[key] = val elif default is not None: connargs[key] = default __connarg('netscaler_host', 'host') __connarg('netscaler_user', 'user') __connarg('netscaler_pass', 'pass') __connarg('netscaler_useSSL', 'useSSL', True) nitro = NSNitro(connargs['host'], connargs['user'], connargs['pass'], connargs['useSSL']) try: nitro.login() except NSNitroError as error: log.debug('netscaler module error - NSNitro.login() failed: %s', error) return None return nitro def _disconnect(nitro): try: nitro.logout() except NSNitroError as error: log.debug('netscaler module error - NSNitro.logout() failed: %s', error) return None return nitro def _servicegroup_get(sg_name, **connection_args): ''' Return a service group ressource or None ''' nitro = _connect(**connection_args) if nitro is None: return None sg = NSServiceGroup() sg.set_servicegroupname(sg_name) try: sg = NSServiceGroup.get(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.get() failed: %s', error) sg = None _disconnect(nitro) return sg def _servicegroup_get_servers(sg_name, **connection_args): ''' Returns a list of members of a servicegroup or None ''' nitro = _connect(**connection_args) if nitro is None: return None sg = NSServiceGroup() sg.set_servicegroupname(sg_name) try: sg = NSServiceGroup.get_servers(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.get_servers failed(): %s', error) sg = None _disconnect(nitro) return sg def _servicegroup_get_server(sg_name, s_name, s_port=None, **connection_args): ''' Returns a member of a service group or None ''' ret = None servers = _servicegroup_get_servers(sg_name, **connection_args) if servers is None: return None for server in servers: if server.get_servername() == s_name: if s_port is not None and s_port != server.get_port(): ret = None ret = server return ret def servicegroup_exists(sg_name, sg_type=None, **connection_args): ''' Checks if a service group exists CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_exists 'serviceGroupName' ''' sg = _servicegroup_get(sg_name, **connection_args) if sg is None: return False if sg_type is not None and sg_type.upper() != sg.get_servicetype(): return False return True def servicegroup_add(sg_name, sg_type='HTTP', **connection_args): ''' Add a new service group If no service type is specified, HTTP will be used. Most common service types: HTTP, SSL, and SSL_BRIDGE CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_add 'serviceGroupName' salt '*' netscaler.servicegroup_add 'serviceGroupName' 'serviceGroupType' ''' ret = True if servicegroup_exists(sg_name): return False nitro = _connect(**connection_args) if nitro is None: return False sg = NSServiceGroup() sg.set_servicegroupname(sg_name) sg.set_servicetype(sg_type.upper()) try: NSServiceGroup.add(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.add() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_delete(sg_name, **connection_args): ''' Delete a new service group CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_delete 'serviceGroupName' ''' ret = True sg = _servicegroup_get(sg_name, **connection_args) if sg is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.delete(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_exists(sg_name, s_name, s_port=None, **connection_args): ''' Check if a server:port combination is a member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_exists 'serviceGroupName' 'serverName' 'serverPort' ''' return _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) is not None def servicegroup_server_up(sg_name, s_name, s_port, **connection_args): ''' Check if a server:port combination is in state UP in a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_up 'serviceGroupName' 'serverName' 'serverPort' ''' server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) return server is not None and server.get_svrstate() == 'UP' def servicegroup_server_enable(sg_name, s_name, s_port, **connection_args): ''' Enable a server:port member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_enable 'serviceGroupName' 'serverName' 'serverPort' ''' ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.enable_server(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.enable_server() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_disable(sg_name, s_name, s_port, **connection_args): ''' Disable a server:port member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_disable 'serviceGroupName' 'serverName' 'serverPort' ''' ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.disable_server(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.disable_server() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_add(sg_name, s_name, s_port, **connection_args): ''' Add a server:port member to a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_add 'serviceGroupName' 'serverName' 'serverPort' ''' # Nitro will throw an error if the server is already present ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is not None: return False nitro = _connect(**connection_args) if nitro is None: return False sgsb = NSServiceGroupServerBinding() sgsb.set_servicegroupname(sg_name) sgsb.set_servername(s_name) sgsb.set_port(s_port) try: NSServiceGroupServerBinding.add(nitro, sgsb) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroupServerBinding() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_delete(sg_name, s_name, s_port, **connection_args): ''' Remove a server:port member from a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_delete 'serviceGroupName' 'serverName' 'serverPort' ''' # Nitro will throw an error if the server is already not present ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False sgsb = NSServiceGroupServerBinding() sgsb.set_servicegroupname(sg_name) sgsb.set_servername(s_name) sgsb.set_port(s_port) try: NSServiceGroupServerBinding.delete(nitro, sgsb) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroupServerBinding() failed: %s', error) ret = False _disconnect(nitro) return ret def _service_get(s_name, **connection_args): ''' Returns a service ressource or None ''' nitro = _connect(**connection_args) if nitro is None: return None service = NSService() service.set_name(s_name) try: service = NSService.get(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.get() failed: %s', error) service = None _disconnect(nitro) return service def service_exists(s_name, **connection_args): ''' Checks if a service exists CLI Example: .. code-block:: bash salt '*' netscaler.service_exists 'serviceName' ''' return _service_get(s_name, **connection_args) is not None def service_up(s_name, **connection_args): ''' Checks if a service is UP CLI Example: .. code-block:: bash salt '*' netscaler.service_up 'serviceName' ''' service = _service_get(s_name, **connection_args) return service is not None and service.get_svrstate() == 'UP' def service_enable(s_name, **connection_args): ''' Enable a service CLI Example: .. code-block:: bash salt '*' netscaler.service_enable 'serviceName' ''' ret = True service = _service_get(s_name, **connection_args) if service is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSService.enable(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def service_disable(s_name, s_delay=None, **connection_args): ''' Disable a service CLI Example: .. code-block:: bash salt '*' netscaler.service_disable 'serviceName' salt '*' netscaler.service_disable 'serviceName' 'delayInSeconds' ''' ret = True service = _service_get(s_name, **connection_args) if service is None: return False if s_delay is not None: service.set_delay(s_delay) nitro = _connect(**connection_args) if nitro is None: return False try: NSService.disable(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def _server_get(s_name, **connection_args): nitro = _connect(**connection_args) if nitro is None: return None server = NSServer() server.set_name(s_name) try: server = NSServer.get(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.get() failed: %s', error) server = None _disconnect(nitro) return server def server_exists(s_name, ip=None, s_state=None, **connection_args): ''' Checks if a server exists CLI Example: .. code-block:: bash salt '*' netscaler.server_exists 'serverName' ''' server = _server_get(s_name, **connection_args) if server is None: return False if ip is not None and ip != server.get_ipaddress(): return False if s_state is not None and s_state.upper() != server.get_state(): return False return True def server_add(s_name, s_ip, s_state=None, **connection_args): ''' Add a server Note: The default server state is ENABLED CLI Example: .. code-block:: bash salt '*' netscaler.server_add 'serverName' 'serverIpAddress' salt '*' netscaler.server_add 'serverName' 'serverIpAddress' 'serverState' ''' ret = True if server_exists(s_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False server = NSServer() server.set_name(s_name) server.set_ipaddress(s_ip) if s_state is not None: server.set_state(s_state) try: NSServer.add(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.add() failed: %s', error) ret = False _disconnect(nitro) return ret def server_delete(s_name, **connection_args): ''' Delete a server CLI Example: .. code-block:: bash salt '*' netscaler.server_delete 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.delete(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def server_enabled(s_name, **connection_args): ''' Check if a server is enabled globally CLI Example: .. code-block:: bash salt '*' netscaler.server_enabled 'serverName' ''' server = _server_get(s_name, **connection_args) return server is not None and server.get_state() == 'ENABLED' def server_enable(s_name, **connection_args): ''' Enables a server globally CLI Example: .. code-block:: bash salt '*' netscaler.server_enable 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False if server.get_state() == 'ENABLED': return True nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.enable(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def server_disable(s_name, **connection_args): ''' Disable a server globally CLI Example: .. code-block:: bash salt '*' netscaler.server_disable 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False if server.get_state() == 'DISABLED': return True nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.disable(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.disable() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_get(v_name, **connection_args): nitro = _connect(**connection_args) vserver = NSLBVServer() vserver.set_name(v_name) if nitro is None: return None try: vserver = NSLBVServer.get(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSLBVServer.get() failed: %s', error) vserver = None _disconnect(nitro) return vserver def vserver_exists(v_name, v_ip=None, v_port=None, v_type=None, **connection_args): ''' Checks if a vserver exists CLI Example: .. code-block:: bash salt '*' netscaler.vserver_exists 'vserverName' ''' vserver = _vserver_get(v_name, **connection_args) if vserver is None: return False if v_ip is not None and vserver.get_ipv46() != v_ip: return False if v_port is not None and vserver.get_port() != v_port: return False if v_type is not None and vserver.get_servicetype().upper() != v_type.upper(): return False return True def vserver_add(v_name, v_ip, v_port, v_type, **connection_args): ''' Add a new lb vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_add 'vserverName' 'vserverIP' 'vserverPort' 'vserverType' salt '*' netscaler.vserver_add 'alex.patate.chaude.443' '1.2.3.4' '443' 'SSL' ''' ret = True if vserver_exists(v_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vserver = NSLBVServer() vserver.set_name(v_name) vserver.set_ipv46(v_ip) vserver.set_port(v_port) vserver.set_servicetype(v_type.upper()) try: NSLBVServer.add(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSLBVServer.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_delete(v_name, **connection_args): ''' Delete a lb vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_delete 'vserverName' ''' ret = True vserver = _vserver_get(v_name, **connection_args) if vserver is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSLBVServer.delete(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSVServer.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_servicegroup_get(v_name, sg_name, **connection_args): ret = None nitro = _connect(**connection_args) if nitro is None: return None vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) try: vsgs = NSLBVServerServiceGroupBinding.get(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.get() failed: %s', error) return None for vsg in vsgs: if vsg.get_servicegroupname() == sg_name: ret = vsg _disconnect(nitro) return ret def vserver_servicegroup_exists(v_name, sg_name, **connection_args): ''' Checks if a servicegroup is tied to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_exists 'vserverName' 'serviceGroupName' ''' return _vserver_servicegroup_get(v_name, sg_name, **connection_args) is not None def vserver_servicegroup_add(v_name, sg_name, **connection_args): ''' Bind a servicegroup to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_add 'vserverName' 'serviceGroupName' ''' ret = True if vserver_servicegroup_exists(v_name, sg_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) vsg.set_servicegroupname(sg_name) try: NSLBVServerServiceGroupBinding.add(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_servicegroup_delete(v_name, sg_name, **connection_args): ''' Unbind a servicegroup from a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_delete 'vserverName' 'serviceGroupName' ''' ret = True if not vserver_servicegroup_exists(v_name, sg_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) vsg.set_servicegroupname(sg_name) try: NSLBVServerServiceGroupBinding.delete(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_sslcert_get(v_name, sc_name, **connection_args): ret = None nitro = _connect(**connection_args) if nitro is None: return None sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) try: sslcerts = NSSSLVServerSSLCertKeyBinding.get(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.get() failed: %s', error) return None for sslcert in sslcerts: if sslcert.get_certkeyname() == sc_name: ret = sslcert return ret def vserver_sslcert_exists(v_name, sc_name, **connection_args): ''' Checks if a SSL certificate is tied to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_exists 'vserverName' 'sslCertificateName' ''' return _vserver_sslcert_get(v_name, sc_name, **connection_args) is not None def vserver_sslcert_add(v_name, sc_name, **connection_args): ''' Binds a SSL certificate to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_add 'vserverName' 'sslCertificateName' ''' ret = True if vserver_sslcert_exists(v_name, sc_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) sslcert.set_certkeyname(sc_name) try: NSSSLVServerSSLCertKeyBinding.add(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_sslcert_delete(v_name, sc_name, **connection_args): ''' Unbinds a SSL certificate from a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_delete 'vserverName' 'sslCertificateName' ''' ret = True if not vserver_sslcert_exists(v_name, sc_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) sslcert.set_certkeyname(sc_name) try: NSSSLVServerSSLCertKeyBinding.delete(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.delete() failed: %s', error) ret = False _disconnect(nitro) return ret
saltstack/salt
salt/modules/netscaler.py
server_enabled
python
def server_enabled(s_name, **connection_args): ''' Check if a server is enabled globally CLI Example: .. code-block:: bash salt '*' netscaler.server_enabled 'serverName' ''' server = _server_get(s_name, **connection_args) return server is not None and server.get_state() == 'ENABLED'
Check if a server is enabled globally CLI Example: .. code-block:: bash salt '*' netscaler.server_enabled 'serverName'
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/netscaler.py#L642-L653
[ "def _server_get(s_name, **connection_args):\n nitro = _connect(**connection_args)\n if nitro is None:\n return None\n server = NSServer()\n server.set_name(s_name)\n try:\n server = NSServer.get(nitro, server)\n except NSNitroError as error:\n log.debug('netscaler module error - NSServer.get() failed: %s', error)\n server = None\n _disconnect(nitro)\n return server\n" ]
# -*- coding: utf-8 -*- ''' Module to provide Citrix Netscaler compatibility to Salt (compatible with netscaler 9.2+) .. versionadded:: 2015.2.0 :depends: - nsnitro Python module .. note:: You can install nsnitro using: .. code-block:: bash pip install nsnitro :configuration: This module accepts connection configuration details either as parameters, or as configuration settings in /etc/salt/minion on the relevant minions .. code-block:: yaml netscaler.host: 1.2.3.4 netscaler.user: user netscaler.pass: password This data can also be passed into pillar. Options passed into opts will overwrite options passed into pillar. :CLI Examples: Calls relying on configuration passed using /etc/salt/minion, grains, or pillars: .. code-block:: bash salt-call netscaler.server_exists server_name Calls passing configuration as opts .. code-block:: bash salt-call netscaler.server_exists server_name netscaler_host=1.2.3.4 netscaler_user=username netscaler_pass=password salt-call netscaler.server_exists server_name netscaler_host=1.2.3.5 netscaler_user=username2 netscaler_pass=password2 salt-call netscaler.server_enable server_name2 netscaler_host=1.2.3.5 salt-call netscaler.server_up server_name3 netscaler_host=1.2.3.6 netscaler_useSSL=False ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import logging # Import Salt libs import salt.utils.platform try: from nsnitro.nsnitro import NSNitro from nsnitro.nsexceptions import NSNitroError from nsnitro.nsresources.nsserver import NSServer from nsnitro.nsresources.nsservice import NSService from nsnitro.nsresources.nsservicegroup import NSServiceGroup from nsnitro.nsresources.nsservicegroupserverbinding import NSServiceGroupServerBinding from nsnitro.nsresources.nslbvserver import NSLBVServer from nsnitro.nsresources.nslbvserverservicegroupbinding import NSLBVServerServiceGroupBinding from nsnitro.nsresources.nssslvserversslcertkeybinding import NSSSLVServerSSLCertKeyBinding HAS_NSNITRO = True except ImportError: HAS_NSNITRO = False log = logging.getLogger(__name__) def __virtual__(): ''' Only load this module if the nsnitro library is installed ''' if salt.utils.platform.is_windows(): return ( False, 'The netscaler execution module failed to load: not available ' 'on Windows.' ) if HAS_NSNITRO: return 'netscaler' return ( False, 'The netscaler execution module failed to load: the nsnitro python ' 'library is not available.' ) def _connect(**kwargs): ''' Initialise netscaler connection ''' connargs = dict() # Shamelessy ripped from the mysql module def __connarg(name, key=None, default=None): ''' Add key to connargs, only if name exists in our kwargs or as netscaler.<name> in __opts__ or __pillar__ Evaluate in said order - kwargs, opts then pillar. To avoid collision with other functions, kwargs-based connection arguments are prefixed with 'netscaler_' (i.e. 'netscaler_host', 'netscaler_user', etc.). ''' if key is None: key = name if name in kwargs: connargs[key] = kwargs[name] else: prefix = 'netscaler_' if name.startswith(prefix): try: name = name[len(prefix):] except IndexError: return val = __salt__['config.option']('netscaler.{0}'.format(name), None) if val is not None: connargs[key] = val elif default is not None: connargs[key] = default __connarg('netscaler_host', 'host') __connarg('netscaler_user', 'user') __connarg('netscaler_pass', 'pass') __connarg('netscaler_useSSL', 'useSSL', True) nitro = NSNitro(connargs['host'], connargs['user'], connargs['pass'], connargs['useSSL']) try: nitro.login() except NSNitroError as error: log.debug('netscaler module error - NSNitro.login() failed: %s', error) return None return nitro def _disconnect(nitro): try: nitro.logout() except NSNitroError as error: log.debug('netscaler module error - NSNitro.logout() failed: %s', error) return None return nitro def _servicegroup_get(sg_name, **connection_args): ''' Return a service group ressource or None ''' nitro = _connect(**connection_args) if nitro is None: return None sg = NSServiceGroup() sg.set_servicegroupname(sg_name) try: sg = NSServiceGroup.get(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.get() failed: %s', error) sg = None _disconnect(nitro) return sg def _servicegroup_get_servers(sg_name, **connection_args): ''' Returns a list of members of a servicegroup or None ''' nitro = _connect(**connection_args) if nitro is None: return None sg = NSServiceGroup() sg.set_servicegroupname(sg_name) try: sg = NSServiceGroup.get_servers(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.get_servers failed(): %s', error) sg = None _disconnect(nitro) return sg def _servicegroup_get_server(sg_name, s_name, s_port=None, **connection_args): ''' Returns a member of a service group or None ''' ret = None servers = _servicegroup_get_servers(sg_name, **connection_args) if servers is None: return None for server in servers: if server.get_servername() == s_name: if s_port is not None and s_port != server.get_port(): ret = None ret = server return ret def servicegroup_exists(sg_name, sg_type=None, **connection_args): ''' Checks if a service group exists CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_exists 'serviceGroupName' ''' sg = _servicegroup_get(sg_name, **connection_args) if sg is None: return False if sg_type is not None and sg_type.upper() != sg.get_servicetype(): return False return True def servicegroup_add(sg_name, sg_type='HTTP', **connection_args): ''' Add a new service group If no service type is specified, HTTP will be used. Most common service types: HTTP, SSL, and SSL_BRIDGE CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_add 'serviceGroupName' salt '*' netscaler.servicegroup_add 'serviceGroupName' 'serviceGroupType' ''' ret = True if servicegroup_exists(sg_name): return False nitro = _connect(**connection_args) if nitro is None: return False sg = NSServiceGroup() sg.set_servicegroupname(sg_name) sg.set_servicetype(sg_type.upper()) try: NSServiceGroup.add(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.add() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_delete(sg_name, **connection_args): ''' Delete a new service group CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_delete 'serviceGroupName' ''' ret = True sg = _servicegroup_get(sg_name, **connection_args) if sg is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.delete(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_exists(sg_name, s_name, s_port=None, **connection_args): ''' Check if a server:port combination is a member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_exists 'serviceGroupName' 'serverName' 'serverPort' ''' return _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) is not None def servicegroup_server_up(sg_name, s_name, s_port, **connection_args): ''' Check if a server:port combination is in state UP in a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_up 'serviceGroupName' 'serverName' 'serverPort' ''' server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) return server is not None and server.get_svrstate() == 'UP' def servicegroup_server_enable(sg_name, s_name, s_port, **connection_args): ''' Enable a server:port member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_enable 'serviceGroupName' 'serverName' 'serverPort' ''' ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.enable_server(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.enable_server() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_disable(sg_name, s_name, s_port, **connection_args): ''' Disable a server:port member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_disable 'serviceGroupName' 'serverName' 'serverPort' ''' ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.disable_server(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.disable_server() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_add(sg_name, s_name, s_port, **connection_args): ''' Add a server:port member to a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_add 'serviceGroupName' 'serverName' 'serverPort' ''' # Nitro will throw an error if the server is already present ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is not None: return False nitro = _connect(**connection_args) if nitro is None: return False sgsb = NSServiceGroupServerBinding() sgsb.set_servicegroupname(sg_name) sgsb.set_servername(s_name) sgsb.set_port(s_port) try: NSServiceGroupServerBinding.add(nitro, sgsb) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroupServerBinding() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_delete(sg_name, s_name, s_port, **connection_args): ''' Remove a server:port member from a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_delete 'serviceGroupName' 'serverName' 'serverPort' ''' # Nitro will throw an error if the server is already not present ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False sgsb = NSServiceGroupServerBinding() sgsb.set_servicegroupname(sg_name) sgsb.set_servername(s_name) sgsb.set_port(s_port) try: NSServiceGroupServerBinding.delete(nitro, sgsb) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroupServerBinding() failed: %s', error) ret = False _disconnect(nitro) return ret def _service_get(s_name, **connection_args): ''' Returns a service ressource or None ''' nitro = _connect(**connection_args) if nitro is None: return None service = NSService() service.set_name(s_name) try: service = NSService.get(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.get() failed: %s', error) service = None _disconnect(nitro) return service def service_exists(s_name, **connection_args): ''' Checks if a service exists CLI Example: .. code-block:: bash salt '*' netscaler.service_exists 'serviceName' ''' return _service_get(s_name, **connection_args) is not None def service_up(s_name, **connection_args): ''' Checks if a service is UP CLI Example: .. code-block:: bash salt '*' netscaler.service_up 'serviceName' ''' service = _service_get(s_name, **connection_args) return service is not None and service.get_svrstate() == 'UP' def service_enable(s_name, **connection_args): ''' Enable a service CLI Example: .. code-block:: bash salt '*' netscaler.service_enable 'serviceName' ''' ret = True service = _service_get(s_name, **connection_args) if service is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSService.enable(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def service_disable(s_name, s_delay=None, **connection_args): ''' Disable a service CLI Example: .. code-block:: bash salt '*' netscaler.service_disable 'serviceName' salt '*' netscaler.service_disable 'serviceName' 'delayInSeconds' ''' ret = True service = _service_get(s_name, **connection_args) if service is None: return False if s_delay is not None: service.set_delay(s_delay) nitro = _connect(**connection_args) if nitro is None: return False try: NSService.disable(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def _server_get(s_name, **connection_args): nitro = _connect(**connection_args) if nitro is None: return None server = NSServer() server.set_name(s_name) try: server = NSServer.get(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.get() failed: %s', error) server = None _disconnect(nitro) return server def server_exists(s_name, ip=None, s_state=None, **connection_args): ''' Checks if a server exists CLI Example: .. code-block:: bash salt '*' netscaler.server_exists 'serverName' ''' server = _server_get(s_name, **connection_args) if server is None: return False if ip is not None and ip != server.get_ipaddress(): return False if s_state is not None and s_state.upper() != server.get_state(): return False return True def server_add(s_name, s_ip, s_state=None, **connection_args): ''' Add a server Note: The default server state is ENABLED CLI Example: .. code-block:: bash salt '*' netscaler.server_add 'serverName' 'serverIpAddress' salt '*' netscaler.server_add 'serverName' 'serverIpAddress' 'serverState' ''' ret = True if server_exists(s_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False server = NSServer() server.set_name(s_name) server.set_ipaddress(s_ip) if s_state is not None: server.set_state(s_state) try: NSServer.add(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.add() failed: %s', error) ret = False _disconnect(nitro) return ret def server_delete(s_name, **connection_args): ''' Delete a server CLI Example: .. code-block:: bash salt '*' netscaler.server_delete 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.delete(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def server_update(s_name, s_ip, **connection_args): ''' Update a server's attributes CLI Example: .. code-block:: bash salt '*' netscaler.server_update 'serverName' 'serverIP' ''' altered = False cur_server = _server_get(s_name, **connection_args) if cur_server is None: return False alt_server = NSServer() alt_server.set_name(s_name) if cur_server.get_ipaddress() != s_ip: alt_server.set_ipaddress(s_ip) altered = True # Nothing to update, the server is already idem if altered is False: return False # Perform the update nitro = _connect(**connection_args) if nitro is None: return False ret = True try: NSServer.update(nitro, alt_server) except NSNitroError as error: log.debug('netscaler module error - NSServer.update() failed: %s', error) ret = False _disconnect(nitro) return ret def server_enable(s_name, **connection_args): ''' Enables a server globally CLI Example: .. code-block:: bash salt '*' netscaler.server_enable 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False if server.get_state() == 'ENABLED': return True nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.enable(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def server_disable(s_name, **connection_args): ''' Disable a server globally CLI Example: .. code-block:: bash salt '*' netscaler.server_disable 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False if server.get_state() == 'DISABLED': return True nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.disable(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.disable() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_get(v_name, **connection_args): nitro = _connect(**connection_args) vserver = NSLBVServer() vserver.set_name(v_name) if nitro is None: return None try: vserver = NSLBVServer.get(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSLBVServer.get() failed: %s', error) vserver = None _disconnect(nitro) return vserver def vserver_exists(v_name, v_ip=None, v_port=None, v_type=None, **connection_args): ''' Checks if a vserver exists CLI Example: .. code-block:: bash salt '*' netscaler.vserver_exists 'vserverName' ''' vserver = _vserver_get(v_name, **connection_args) if vserver is None: return False if v_ip is not None and vserver.get_ipv46() != v_ip: return False if v_port is not None and vserver.get_port() != v_port: return False if v_type is not None and vserver.get_servicetype().upper() != v_type.upper(): return False return True def vserver_add(v_name, v_ip, v_port, v_type, **connection_args): ''' Add a new lb vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_add 'vserverName' 'vserverIP' 'vserverPort' 'vserverType' salt '*' netscaler.vserver_add 'alex.patate.chaude.443' '1.2.3.4' '443' 'SSL' ''' ret = True if vserver_exists(v_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vserver = NSLBVServer() vserver.set_name(v_name) vserver.set_ipv46(v_ip) vserver.set_port(v_port) vserver.set_servicetype(v_type.upper()) try: NSLBVServer.add(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSLBVServer.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_delete(v_name, **connection_args): ''' Delete a lb vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_delete 'vserverName' ''' ret = True vserver = _vserver_get(v_name, **connection_args) if vserver is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSLBVServer.delete(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSVServer.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_servicegroup_get(v_name, sg_name, **connection_args): ret = None nitro = _connect(**connection_args) if nitro is None: return None vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) try: vsgs = NSLBVServerServiceGroupBinding.get(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.get() failed: %s', error) return None for vsg in vsgs: if vsg.get_servicegroupname() == sg_name: ret = vsg _disconnect(nitro) return ret def vserver_servicegroup_exists(v_name, sg_name, **connection_args): ''' Checks if a servicegroup is tied to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_exists 'vserverName' 'serviceGroupName' ''' return _vserver_servicegroup_get(v_name, sg_name, **connection_args) is not None def vserver_servicegroup_add(v_name, sg_name, **connection_args): ''' Bind a servicegroup to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_add 'vserverName' 'serviceGroupName' ''' ret = True if vserver_servicegroup_exists(v_name, sg_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) vsg.set_servicegroupname(sg_name) try: NSLBVServerServiceGroupBinding.add(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_servicegroup_delete(v_name, sg_name, **connection_args): ''' Unbind a servicegroup from a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_delete 'vserverName' 'serviceGroupName' ''' ret = True if not vserver_servicegroup_exists(v_name, sg_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) vsg.set_servicegroupname(sg_name) try: NSLBVServerServiceGroupBinding.delete(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_sslcert_get(v_name, sc_name, **connection_args): ret = None nitro = _connect(**connection_args) if nitro is None: return None sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) try: sslcerts = NSSSLVServerSSLCertKeyBinding.get(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.get() failed: %s', error) return None for sslcert in sslcerts: if sslcert.get_certkeyname() == sc_name: ret = sslcert return ret def vserver_sslcert_exists(v_name, sc_name, **connection_args): ''' Checks if a SSL certificate is tied to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_exists 'vserverName' 'sslCertificateName' ''' return _vserver_sslcert_get(v_name, sc_name, **connection_args) is not None def vserver_sslcert_add(v_name, sc_name, **connection_args): ''' Binds a SSL certificate to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_add 'vserverName' 'sslCertificateName' ''' ret = True if vserver_sslcert_exists(v_name, sc_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) sslcert.set_certkeyname(sc_name) try: NSSSLVServerSSLCertKeyBinding.add(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_sslcert_delete(v_name, sc_name, **connection_args): ''' Unbinds a SSL certificate from a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_delete 'vserverName' 'sslCertificateName' ''' ret = True if not vserver_sslcert_exists(v_name, sc_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) sslcert.set_certkeyname(sc_name) try: NSSSLVServerSSLCertKeyBinding.delete(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.delete() failed: %s', error) ret = False _disconnect(nitro) return ret
saltstack/salt
salt/modules/netscaler.py
server_enable
python
def server_enable(s_name, **connection_args): ''' Enables a server globally CLI Example: .. code-block:: bash salt '*' netscaler.server_enable 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False if server.get_state() == 'ENABLED': return True nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.enable(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.enable() failed: %s', error) ret = False _disconnect(nitro) return ret
Enables a server globally CLI Example: .. code-block:: bash salt '*' netscaler.server_enable 'serverName'
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/netscaler.py#L656-L681
[ "def _connect(**kwargs):\n '''\n Initialise netscaler connection\n '''\n connargs = dict()\n\n # Shamelessy ripped from the mysql module\n def __connarg(name, key=None, default=None):\n '''\n Add key to connargs, only if name exists in our kwargs or as\n netscaler.<name> in __opts__ or __pillar__ Evaluate in said order - kwargs,\n opts then pillar. To avoid collision with other functions, kwargs-based\n connection arguments are prefixed with 'netscaler_' (i.e.\n 'netscaler_host', 'netscaler_user', etc.).\n '''\n if key is None:\n key = name\n if name in kwargs:\n connargs[key] = kwargs[name]\n else:\n prefix = 'netscaler_'\n if name.startswith(prefix):\n try:\n name = name[len(prefix):]\n except IndexError:\n return\n val = __salt__['config.option']('netscaler.{0}'.format(name), None)\n if val is not None:\n connargs[key] = val\n elif default is not None:\n connargs[key] = default\n\n __connarg('netscaler_host', 'host')\n __connarg('netscaler_user', 'user')\n __connarg('netscaler_pass', 'pass')\n __connarg('netscaler_useSSL', 'useSSL', True)\n\n nitro = NSNitro(connargs['host'], connargs['user'], connargs['pass'], connargs['useSSL'])\n try:\n nitro.login()\n except NSNitroError as error:\n log.debug('netscaler module error - NSNitro.login() failed: %s', error)\n return None\n return nitro\n", "def _disconnect(nitro):\n try:\n nitro.logout()\n except NSNitroError as error:\n log.debug('netscaler module error - NSNitro.logout() failed: %s', error)\n return None\n return nitro\n", "def _server_get(s_name, **connection_args):\n nitro = _connect(**connection_args)\n if nitro is None:\n return None\n server = NSServer()\n server.set_name(s_name)\n try:\n server = NSServer.get(nitro, server)\n except NSNitroError as error:\n log.debug('netscaler module error - NSServer.get() failed: %s', error)\n server = None\n _disconnect(nitro)\n return server\n" ]
# -*- coding: utf-8 -*- ''' Module to provide Citrix Netscaler compatibility to Salt (compatible with netscaler 9.2+) .. versionadded:: 2015.2.0 :depends: - nsnitro Python module .. note:: You can install nsnitro using: .. code-block:: bash pip install nsnitro :configuration: This module accepts connection configuration details either as parameters, or as configuration settings in /etc/salt/minion on the relevant minions .. code-block:: yaml netscaler.host: 1.2.3.4 netscaler.user: user netscaler.pass: password This data can also be passed into pillar. Options passed into opts will overwrite options passed into pillar. :CLI Examples: Calls relying on configuration passed using /etc/salt/minion, grains, or pillars: .. code-block:: bash salt-call netscaler.server_exists server_name Calls passing configuration as opts .. code-block:: bash salt-call netscaler.server_exists server_name netscaler_host=1.2.3.4 netscaler_user=username netscaler_pass=password salt-call netscaler.server_exists server_name netscaler_host=1.2.3.5 netscaler_user=username2 netscaler_pass=password2 salt-call netscaler.server_enable server_name2 netscaler_host=1.2.3.5 salt-call netscaler.server_up server_name3 netscaler_host=1.2.3.6 netscaler_useSSL=False ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import logging # Import Salt libs import salt.utils.platform try: from nsnitro.nsnitro import NSNitro from nsnitro.nsexceptions import NSNitroError from nsnitro.nsresources.nsserver import NSServer from nsnitro.nsresources.nsservice import NSService from nsnitro.nsresources.nsservicegroup import NSServiceGroup from nsnitro.nsresources.nsservicegroupserverbinding import NSServiceGroupServerBinding from nsnitro.nsresources.nslbvserver import NSLBVServer from nsnitro.nsresources.nslbvserverservicegroupbinding import NSLBVServerServiceGroupBinding from nsnitro.nsresources.nssslvserversslcertkeybinding import NSSSLVServerSSLCertKeyBinding HAS_NSNITRO = True except ImportError: HAS_NSNITRO = False log = logging.getLogger(__name__) def __virtual__(): ''' Only load this module if the nsnitro library is installed ''' if salt.utils.platform.is_windows(): return ( False, 'The netscaler execution module failed to load: not available ' 'on Windows.' ) if HAS_NSNITRO: return 'netscaler' return ( False, 'The netscaler execution module failed to load: the nsnitro python ' 'library is not available.' ) def _connect(**kwargs): ''' Initialise netscaler connection ''' connargs = dict() # Shamelessy ripped from the mysql module def __connarg(name, key=None, default=None): ''' Add key to connargs, only if name exists in our kwargs or as netscaler.<name> in __opts__ or __pillar__ Evaluate in said order - kwargs, opts then pillar. To avoid collision with other functions, kwargs-based connection arguments are prefixed with 'netscaler_' (i.e. 'netscaler_host', 'netscaler_user', etc.). ''' if key is None: key = name if name in kwargs: connargs[key] = kwargs[name] else: prefix = 'netscaler_' if name.startswith(prefix): try: name = name[len(prefix):] except IndexError: return val = __salt__['config.option']('netscaler.{0}'.format(name), None) if val is not None: connargs[key] = val elif default is not None: connargs[key] = default __connarg('netscaler_host', 'host') __connarg('netscaler_user', 'user') __connarg('netscaler_pass', 'pass') __connarg('netscaler_useSSL', 'useSSL', True) nitro = NSNitro(connargs['host'], connargs['user'], connargs['pass'], connargs['useSSL']) try: nitro.login() except NSNitroError as error: log.debug('netscaler module error - NSNitro.login() failed: %s', error) return None return nitro def _disconnect(nitro): try: nitro.logout() except NSNitroError as error: log.debug('netscaler module error - NSNitro.logout() failed: %s', error) return None return nitro def _servicegroup_get(sg_name, **connection_args): ''' Return a service group ressource or None ''' nitro = _connect(**connection_args) if nitro is None: return None sg = NSServiceGroup() sg.set_servicegroupname(sg_name) try: sg = NSServiceGroup.get(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.get() failed: %s', error) sg = None _disconnect(nitro) return sg def _servicegroup_get_servers(sg_name, **connection_args): ''' Returns a list of members of a servicegroup or None ''' nitro = _connect(**connection_args) if nitro is None: return None sg = NSServiceGroup() sg.set_servicegroupname(sg_name) try: sg = NSServiceGroup.get_servers(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.get_servers failed(): %s', error) sg = None _disconnect(nitro) return sg def _servicegroup_get_server(sg_name, s_name, s_port=None, **connection_args): ''' Returns a member of a service group or None ''' ret = None servers = _servicegroup_get_servers(sg_name, **connection_args) if servers is None: return None for server in servers: if server.get_servername() == s_name: if s_port is not None and s_port != server.get_port(): ret = None ret = server return ret def servicegroup_exists(sg_name, sg_type=None, **connection_args): ''' Checks if a service group exists CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_exists 'serviceGroupName' ''' sg = _servicegroup_get(sg_name, **connection_args) if sg is None: return False if sg_type is not None and sg_type.upper() != sg.get_servicetype(): return False return True def servicegroup_add(sg_name, sg_type='HTTP', **connection_args): ''' Add a new service group If no service type is specified, HTTP will be used. Most common service types: HTTP, SSL, and SSL_BRIDGE CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_add 'serviceGroupName' salt '*' netscaler.servicegroup_add 'serviceGroupName' 'serviceGroupType' ''' ret = True if servicegroup_exists(sg_name): return False nitro = _connect(**connection_args) if nitro is None: return False sg = NSServiceGroup() sg.set_servicegroupname(sg_name) sg.set_servicetype(sg_type.upper()) try: NSServiceGroup.add(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.add() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_delete(sg_name, **connection_args): ''' Delete a new service group CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_delete 'serviceGroupName' ''' ret = True sg = _servicegroup_get(sg_name, **connection_args) if sg is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.delete(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_exists(sg_name, s_name, s_port=None, **connection_args): ''' Check if a server:port combination is a member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_exists 'serviceGroupName' 'serverName' 'serverPort' ''' return _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) is not None def servicegroup_server_up(sg_name, s_name, s_port, **connection_args): ''' Check if a server:port combination is in state UP in a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_up 'serviceGroupName' 'serverName' 'serverPort' ''' server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) return server is not None and server.get_svrstate() == 'UP' def servicegroup_server_enable(sg_name, s_name, s_port, **connection_args): ''' Enable a server:port member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_enable 'serviceGroupName' 'serverName' 'serverPort' ''' ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.enable_server(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.enable_server() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_disable(sg_name, s_name, s_port, **connection_args): ''' Disable a server:port member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_disable 'serviceGroupName' 'serverName' 'serverPort' ''' ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.disable_server(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.disable_server() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_add(sg_name, s_name, s_port, **connection_args): ''' Add a server:port member to a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_add 'serviceGroupName' 'serverName' 'serverPort' ''' # Nitro will throw an error if the server is already present ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is not None: return False nitro = _connect(**connection_args) if nitro is None: return False sgsb = NSServiceGroupServerBinding() sgsb.set_servicegroupname(sg_name) sgsb.set_servername(s_name) sgsb.set_port(s_port) try: NSServiceGroupServerBinding.add(nitro, sgsb) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroupServerBinding() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_delete(sg_name, s_name, s_port, **connection_args): ''' Remove a server:port member from a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_delete 'serviceGroupName' 'serverName' 'serverPort' ''' # Nitro will throw an error if the server is already not present ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False sgsb = NSServiceGroupServerBinding() sgsb.set_servicegroupname(sg_name) sgsb.set_servername(s_name) sgsb.set_port(s_port) try: NSServiceGroupServerBinding.delete(nitro, sgsb) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroupServerBinding() failed: %s', error) ret = False _disconnect(nitro) return ret def _service_get(s_name, **connection_args): ''' Returns a service ressource or None ''' nitro = _connect(**connection_args) if nitro is None: return None service = NSService() service.set_name(s_name) try: service = NSService.get(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.get() failed: %s', error) service = None _disconnect(nitro) return service def service_exists(s_name, **connection_args): ''' Checks if a service exists CLI Example: .. code-block:: bash salt '*' netscaler.service_exists 'serviceName' ''' return _service_get(s_name, **connection_args) is not None def service_up(s_name, **connection_args): ''' Checks if a service is UP CLI Example: .. code-block:: bash salt '*' netscaler.service_up 'serviceName' ''' service = _service_get(s_name, **connection_args) return service is not None and service.get_svrstate() == 'UP' def service_enable(s_name, **connection_args): ''' Enable a service CLI Example: .. code-block:: bash salt '*' netscaler.service_enable 'serviceName' ''' ret = True service = _service_get(s_name, **connection_args) if service is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSService.enable(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def service_disable(s_name, s_delay=None, **connection_args): ''' Disable a service CLI Example: .. code-block:: bash salt '*' netscaler.service_disable 'serviceName' salt '*' netscaler.service_disable 'serviceName' 'delayInSeconds' ''' ret = True service = _service_get(s_name, **connection_args) if service is None: return False if s_delay is not None: service.set_delay(s_delay) nitro = _connect(**connection_args) if nitro is None: return False try: NSService.disable(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def _server_get(s_name, **connection_args): nitro = _connect(**connection_args) if nitro is None: return None server = NSServer() server.set_name(s_name) try: server = NSServer.get(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.get() failed: %s', error) server = None _disconnect(nitro) return server def server_exists(s_name, ip=None, s_state=None, **connection_args): ''' Checks if a server exists CLI Example: .. code-block:: bash salt '*' netscaler.server_exists 'serverName' ''' server = _server_get(s_name, **connection_args) if server is None: return False if ip is not None and ip != server.get_ipaddress(): return False if s_state is not None and s_state.upper() != server.get_state(): return False return True def server_add(s_name, s_ip, s_state=None, **connection_args): ''' Add a server Note: The default server state is ENABLED CLI Example: .. code-block:: bash salt '*' netscaler.server_add 'serverName' 'serverIpAddress' salt '*' netscaler.server_add 'serverName' 'serverIpAddress' 'serverState' ''' ret = True if server_exists(s_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False server = NSServer() server.set_name(s_name) server.set_ipaddress(s_ip) if s_state is not None: server.set_state(s_state) try: NSServer.add(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.add() failed: %s', error) ret = False _disconnect(nitro) return ret def server_delete(s_name, **connection_args): ''' Delete a server CLI Example: .. code-block:: bash salt '*' netscaler.server_delete 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.delete(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def server_update(s_name, s_ip, **connection_args): ''' Update a server's attributes CLI Example: .. code-block:: bash salt '*' netscaler.server_update 'serverName' 'serverIP' ''' altered = False cur_server = _server_get(s_name, **connection_args) if cur_server is None: return False alt_server = NSServer() alt_server.set_name(s_name) if cur_server.get_ipaddress() != s_ip: alt_server.set_ipaddress(s_ip) altered = True # Nothing to update, the server is already idem if altered is False: return False # Perform the update nitro = _connect(**connection_args) if nitro is None: return False ret = True try: NSServer.update(nitro, alt_server) except NSNitroError as error: log.debug('netscaler module error - NSServer.update() failed: %s', error) ret = False _disconnect(nitro) return ret def server_enabled(s_name, **connection_args): ''' Check if a server is enabled globally CLI Example: .. code-block:: bash salt '*' netscaler.server_enabled 'serverName' ''' server = _server_get(s_name, **connection_args) return server is not None and server.get_state() == 'ENABLED' def server_disable(s_name, **connection_args): ''' Disable a server globally CLI Example: .. code-block:: bash salt '*' netscaler.server_disable 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False if server.get_state() == 'DISABLED': return True nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.disable(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.disable() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_get(v_name, **connection_args): nitro = _connect(**connection_args) vserver = NSLBVServer() vserver.set_name(v_name) if nitro is None: return None try: vserver = NSLBVServer.get(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSLBVServer.get() failed: %s', error) vserver = None _disconnect(nitro) return vserver def vserver_exists(v_name, v_ip=None, v_port=None, v_type=None, **connection_args): ''' Checks if a vserver exists CLI Example: .. code-block:: bash salt '*' netscaler.vserver_exists 'vserverName' ''' vserver = _vserver_get(v_name, **connection_args) if vserver is None: return False if v_ip is not None and vserver.get_ipv46() != v_ip: return False if v_port is not None and vserver.get_port() != v_port: return False if v_type is not None and vserver.get_servicetype().upper() != v_type.upper(): return False return True def vserver_add(v_name, v_ip, v_port, v_type, **connection_args): ''' Add a new lb vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_add 'vserverName' 'vserverIP' 'vserverPort' 'vserverType' salt '*' netscaler.vserver_add 'alex.patate.chaude.443' '1.2.3.4' '443' 'SSL' ''' ret = True if vserver_exists(v_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vserver = NSLBVServer() vserver.set_name(v_name) vserver.set_ipv46(v_ip) vserver.set_port(v_port) vserver.set_servicetype(v_type.upper()) try: NSLBVServer.add(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSLBVServer.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_delete(v_name, **connection_args): ''' Delete a lb vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_delete 'vserverName' ''' ret = True vserver = _vserver_get(v_name, **connection_args) if vserver is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSLBVServer.delete(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSVServer.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_servicegroup_get(v_name, sg_name, **connection_args): ret = None nitro = _connect(**connection_args) if nitro is None: return None vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) try: vsgs = NSLBVServerServiceGroupBinding.get(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.get() failed: %s', error) return None for vsg in vsgs: if vsg.get_servicegroupname() == sg_name: ret = vsg _disconnect(nitro) return ret def vserver_servicegroup_exists(v_name, sg_name, **connection_args): ''' Checks if a servicegroup is tied to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_exists 'vserverName' 'serviceGroupName' ''' return _vserver_servicegroup_get(v_name, sg_name, **connection_args) is not None def vserver_servicegroup_add(v_name, sg_name, **connection_args): ''' Bind a servicegroup to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_add 'vserverName' 'serviceGroupName' ''' ret = True if vserver_servicegroup_exists(v_name, sg_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) vsg.set_servicegroupname(sg_name) try: NSLBVServerServiceGroupBinding.add(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_servicegroup_delete(v_name, sg_name, **connection_args): ''' Unbind a servicegroup from a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_delete 'vserverName' 'serviceGroupName' ''' ret = True if not vserver_servicegroup_exists(v_name, sg_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) vsg.set_servicegroupname(sg_name) try: NSLBVServerServiceGroupBinding.delete(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_sslcert_get(v_name, sc_name, **connection_args): ret = None nitro = _connect(**connection_args) if nitro is None: return None sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) try: sslcerts = NSSSLVServerSSLCertKeyBinding.get(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.get() failed: %s', error) return None for sslcert in sslcerts: if sslcert.get_certkeyname() == sc_name: ret = sslcert return ret def vserver_sslcert_exists(v_name, sc_name, **connection_args): ''' Checks if a SSL certificate is tied to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_exists 'vserverName' 'sslCertificateName' ''' return _vserver_sslcert_get(v_name, sc_name, **connection_args) is not None def vserver_sslcert_add(v_name, sc_name, **connection_args): ''' Binds a SSL certificate to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_add 'vserverName' 'sslCertificateName' ''' ret = True if vserver_sslcert_exists(v_name, sc_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) sslcert.set_certkeyname(sc_name) try: NSSSLVServerSSLCertKeyBinding.add(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_sslcert_delete(v_name, sc_name, **connection_args): ''' Unbinds a SSL certificate from a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_delete 'vserverName' 'sslCertificateName' ''' ret = True if not vserver_sslcert_exists(v_name, sc_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) sslcert.set_certkeyname(sc_name) try: NSSSLVServerSSLCertKeyBinding.delete(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.delete() failed: %s', error) ret = False _disconnect(nitro) return ret
saltstack/salt
salt/modules/netscaler.py
vserver_exists
python
def vserver_exists(v_name, v_ip=None, v_port=None, v_type=None, **connection_args): ''' Checks if a vserver exists CLI Example: .. code-block:: bash salt '*' netscaler.vserver_exists 'vserverName' ''' vserver = _vserver_get(v_name, **connection_args) if vserver is None: return False if v_ip is not None and vserver.get_ipv46() != v_ip: return False if v_port is not None and vserver.get_port() != v_port: return False if v_type is not None and vserver.get_servicetype().upper() != v_type.upper(): return False return True
Checks if a vserver exists CLI Example: .. code-block:: bash salt '*' netscaler.vserver_exists 'vserverName'
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/netscaler.py#L727-L746
[ "def _vserver_get(v_name, **connection_args):\n nitro = _connect(**connection_args)\n vserver = NSLBVServer()\n vserver.set_name(v_name)\n if nitro is None:\n return None\n try:\n vserver = NSLBVServer.get(nitro, vserver)\n except NSNitroError as error:\n log.debug('netscaler module error - NSLBVServer.get() failed: %s', error)\n vserver = None\n _disconnect(nitro)\n return vserver\n" ]
# -*- coding: utf-8 -*- ''' Module to provide Citrix Netscaler compatibility to Salt (compatible with netscaler 9.2+) .. versionadded:: 2015.2.0 :depends: - nsnitro Python module .. note:: You can install nsnitro using: .. code-block:: bash pip install nsnitro :configuration: This module accepts connection configuration details either as parameters, or as configuration settings in /etc/salt/minion on the relevant minions .. code-block:: yaml netscaler.host: 1.2.3.4 netscaler.user: user netscaler.pass: password This data can also be passed into pillar. Options passed into opts will overwrite options passed into pillar. :CLI Examples: Calls relying on configuration passed using /etc/salt/minion, grains, or pillars: .. code-block:: bash salt-call netscaler.server_exists server_name Calls passing configuration as opts .. code-block:: bash salt-call netscaler.server_exists server_name netscaler_host=1.2.3.4 netscaler_user=username netscaler_pass=password salt-call netscaler.server_exists server_name netscaler_host=1.2.3.5 netscaler_user=username2 netscaler_pass=password2 salt-call netscaler.server_enable server_name2 netscaler_host=1.2.3.5 salt-call netscaler.server_up server_name3 netscaler_host=1.2.3.6 netscaler_useSSL=False ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import logging # Import Salt libs import salt.utils.platform try: from nsnitro.nsnitro import NSNitro from nsnitro.nsexceptions import NSNitroError from nsnitro.nsresources.nsserver import NSServer from nsnitro.nsresources.nsservice import NSService from nsnitro.nsresources.nsservicegroup import NSServiceGroup from nsnitro.nsresources.nsservicegroupserverbinding import NSServiceGroupServerBinding from nsnitro.nsresources.nslbvserver import NSLBVServer from nsnitro.nsresources.nslbvserverservicegroupbinding import NSLBVServerServiceGroupBinding from nsnitro.nsresources.nssslvserversslcertkeybinding import NSSSLVServerSSLCertKeyBinding HAS_NSNITRO = True except ImportError: HAS_NSNITRO = False log = logging.getLogger(__name__) def __virtual__(): ''' Only load this module if the nsnitro library is installed ''' if salt.utils.platform.is_windows(): return ( False, 'The netscaler execution module failed to load: not available ' 'on Windows.' ) if HAS_NSNITRO: return 'netscaler' return ( False, 'The netscaler execution module failed to load: the nsnitro python ' 'library is not available.' ) def _connect(**kwargs): ''' Initialise netscaler connection ''' connargs = dict() # Shamelessy ripped from the mysql module def __connarg(name, key=None, default=None): ''' Add key to connargs, only if name exists in our kwargs or as netscaler.<name> in __opts__ or __pillar__ Evaluate in said order - kwargs, opts then pillar. To avoid collision with other functions, kwargs-based connection arguments are prefixed with 'netscaler_' (i.e. 'netscaler_host', 'netscaler_user', etc.). ''' if key is None: key = name if name in kwargs: connargs[key] = kwargs[name] else: prefix = 'netscaler_' if name.startswith(prefix): try: name = name[len(prefix):] except IndexError: return val = __salt__['config.option']('netscaler.{0}'.format(name), None) if val is not None: connargs[key] = val elif default is not None: connargs[key] = default __connarg('netscaler_host', 'host') __connarg('netscaler_user', 'user') __connarg('netscaler_pass', 'pass') __connarg('netscaler_useSSL', 'useSSL', True) nitro = NSNitro(connargs['host'], connargs['user'], connargs['pass'], connargs['useSSL']) try: nitro.login() except NSNitroError as error: log.debug('netscaler module error - NSNitro.login() failed: %s', error) return None return nitro def _disconnect(nitro): try: nitro.logout() except NSNitroError as error: log.debug('netscaler module error - NSNitro.logout() failed: %s', error) return None return nitro def _servicegroup_get(sg_name, **connection_args): ''' Return a service group ressource or None ''' nitro = _connect(**connection_args) if nitro is None: return None sg = NSServiceGroup() sg.set_servicegroupname(sg_name) try: sg = NSServiceGroup.get(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.get() failed: %s', error) sg = None _disconnect(nitro) return sg def _servicegroup_get_servers(sg_name, **connection_args): ''' Returns a list of members of a servicegroup or None ''' nitro = _connect(**connection_args) if nitro is None: return None sg = NSServiceGroup() sg.set_servicegroupname(sg_name) try: sg = NSServiceGroup.get_servers(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.get_servers failed(): %s', error) sg = None _disconnect(nitro) return sg def _servicegroup_get_server(sg_name, s_name, s_port=None, **connection_args): ''' Returns a member of a service group or None ''' ret = None servers = _servicegroup_get_servers(sg_name, **connection_args) if servers is None: return None for server in servers: if server.get_servername() == s_name: if s_port is not None and s_port != server.get_port(): ret = None ret = server return ret def servicegroup_exists(sg_name, sg_type=None, **connection_args): ''' Checks if a service group exists CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_exists 'serviceGroupName' ''' sg = _servicegroup_get(sg_name, **connection_args) if sg is None: return False if sg_type is not None and sg_type.upper() != sg.get_servicetype(): return False return True def servicegroup_add(sg_name, sg_type='HTTP', **connection_args): ''' Add a new service group If no service type is specified, HTTP will be used. Most common service types: HTTP, SSL, and SSL_BRIDGE CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_add 'serviceGroupName' salt '*' netscaler.servicegroup_add 'serviceGroupName' 'serviceGroupType' ''' ret = True if servicegroup_exists(sg_name): return False nitro = _connect(**connection_args) if nitro is None: return False sg = NSServiceGroup() sg.set_servicegroupname(sg_name) sg.set_servicetype(sg_type.upper()) try: NSServiceGroup.add(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.add() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_delete(sg_name, **connection_args): ''' Delete a new service group CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_delete 'serviceGroupName' ''' ret = True sg = _servicegroup_get(sg_name, **connection_args) if sg is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.delete(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_exists(sg_name, s_name, s_port=None, **connection_args): ''' Check if a server:port combination is a member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_exists 'serviceGroupName' 'serverName' 'serverPort' ''' return _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) is not None def servicegroup_server_up(sg_name, s_name, s_port, **connection_args): ''' Check if a server:port combination is in state UP in a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_up 'serviceGroupName' 'serverName' 'serverPort' ''' server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) return server is not None and server.get_svrstate() == 'UP' def servicegroup_server_enable(sg_name, s_name, s_port, **connection_args): ''' Enable a server:port member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_enable 'serviceGroupName' 'serverName' 'serverPort' ''' ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.enable_server(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.enable_server() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_disable(sg_name, s_name, s_port, **connection_args): ''' Disable a server:port member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_disable 'serviceGroupName' 'serverName' 'serverPort' ''' ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.disable_server(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.disable_server() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_add(sg_name, s_name, s_port, **connection_args): ''' Add a server:port member to a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_add 'serviceGroupName' 'serverName' 'serverPort' ''' # Nitro will throw an error if the server is already present ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is not None: return False nitro = _connect(**connection_args) if nitro is None: return False sgsb = NSServiceGroupServerBinding() sgsb.set_servicegroupname(sg_name) sgsb.set_servername(s_name) sgsb.set_port(s_port) try: NSServiceGroupServerBinding.add(nitro, sgsb) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroupServerBinding() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_delete(sg_name, s_name, s_port, **connection_args): ''' Remove a server:port member from a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_delete 'serviceGroupName' 'serverName' 'serverPort' ''' # Nitro will throw an error if the server is already not present ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False sgsb = NSServiceGroupServerBinding() sgsb.set_servicegroupname(sg_name) sgsb.set_servername(s_name) sgsb.set_port(s_port) try: NSServiceGroupServerBinding.delete(nitro, sgsb) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroupServerBinding() failed: %s', error) ret = False _disconnect(nitro) return ret def _service_get(s_name, **connection_args): ''' Returns a service ressource or None ''' nitro = _connect(**connection_args) if nitro is None: return None service = NSService() service.set_name(s_name) try: service = NSService.get(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.get() failed: %s', error) service = None _disconnect(nitro) return service def service_exists(s_name, **connection_args): ''' Checks if a service exists CLI Example: .. code-block:: bash salt '*' netscaler.service_exists 'serviceName' ''' return _service_get(s_name, **connection_args) is not None def service_up(s_name, **connection_args): ''' Checks if a service is UP CLI Example: .. code-block:: bash salt '*' netscaler.service_up 'serviceName' ''' service = _service_get(s_name, **connection_args) return service is not None and service.get_svrstate() == 'UP' def service_enable(s_name, **connection_args): ''' Enable a service CLI Example: .. code-block:: bash salt '*' netscaler.service_enable 'serviceName' ''' ret = True service = _service_get(s_name, **connection_args) if service is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSService.enable(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def service_disable(s_name, s_delay=None, **connection_args): ''' Disable a service CLI Example: .. code-block:: bash salt '*' netscaler.service_disable 'serviceName' salt '*' netscaler.service_disable 'serviceName' 'delayInSeconds' ''' ret = True service = _service_get(s_name, **connection_args) if service is None: return False if s_delay is not None: service.set_delay(s_delay) nitro = _connect(**connection_args) if nitro is None: return False try: NSService.disable(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def _server_get(s_name, **connection_args): nitro = _connect(**connection_args) if nitro is None: return None server = NSServer() server.set_name(s_name) try: server = NSServer.get(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.get() failed: %s', error) server = None _disconnect(nitro) return server def server_exists(s_name, ip=None, s_state=None, **connection_args): ''' Checks if a server exists CLI Example: .. code-block:: bash salt '*' netscaler.server_exists 'serverName' ''' server = _server_get(s_name, **connection_args) if server is None: return False if ip is not None and ip != server.get_ipaddress(): return False if s_state is not None and s_state.upper() != server.get_state(): return False return True def server_add(s_name, s_ip, s_state=None, **connection_args): ''' Add a server Note: The default server state is ENABLED CLI Example: .. code-block:: bash salt '*' netscaler.server_add 'serverName' 'serverIpAddress' salt '*' netscaler.server_add 'serverName' 'serverIpAddress' 'serverState' ''' ret = True if server_exists(s_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False server = NSServer() server.set_name(s_name) server.set_ipaddress(s_ip) if s_state is not None: server.set_state(s_state) try: NSServer.add(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.add() failed: %s', error) ret = False _disconnect(nitro) return ret def server_delete(s_name, **connection_args): ''' Delete a server CLI Example: .. code-block:: bash salt '*' netscaler.server_delete 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.delete(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def server_update(s_name, s_ip, **connection_args): ''' Update a server's attributes CLI Example: .. code-block:: bash salt '*' netscaler.server_update 'serverName' 'serverIP' ''' altered = False cur_server = _server_get(s_name, **connection_args) if cur_server is None: return False alt_server = NSServer() alt_server.set_name(s_name) if cur_server.get_ipaddress() != s_ip: alt_server.set_ipaddress(s_ip) altered = True # Nothing to update, the server is already idem if altered is False: return False # Perform the update nitro = _connect(**connection_args) if nitro is None: return False ret = True try: NSServer.update(nitro, alt_server) except NSNitroError as error: log.debug('netscaler module error - NSServer.update() failed: %s', error) ret = False _disconnect(nitro) return ret def server_enabled(s_name, **connection_args): ''' Check if a server is enabled globally CLI Example: .. code-block:: bash salt '*' netscaler.server_enabled 'serverName' ''' server = _server_get(s_name, **connection_args) return server is not None and server.get_state() == 'ENABLED' def server_enable(s_name, **connection_args): ''' Enables a server globally CLI Example: .. code-block:: bash salt '*' netscaler.server_enable 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False if server.get_state() == 'ENABLED': return True nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.enable(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def server_disable(s_name, **connection_args): ''' Disable a server globally CLI Example: .. code-block:: bash salt '*' netscaler.server_disable 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False if server.get_state() == 'DISABLED': return True nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.disable(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.disable() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_get(v_name, **connection_args): nitro = _connect(**connection_args) vserver = NSLBVServer() vserver.set_name(v_name) if nitro is None: return None try: vserver = NSLBVServer.get(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSLBVServer.get() failed: %s', error) vserver = None _disconnect(nitro) return vserver def vserver_add(v_name, v_ip, v_port, v_type, **connection_args): ''' Add a new lb vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_add 'vserverName' 'vserverIP' 'vserverPort' 'vserverType' salt '*' netscaler.vserver_add 'alex.patate.chaude.443' '1.2.3.4' '443' 'SSL' ''' ret = True if vserver_exists(v_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vserver = NSLBVServer() vserver.set_name(v_name) vserver.set_ipv46(v_ip) vserver.set_port(v_port) vserver.set_servicetype(v_type.upper()) try: NSLBVServer.add(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSLBVServer.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_delete(v_name, **connection_args): ''' Delete a lb vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_delete 'vserverName' ''' ret = True vserver = _vserver_get(v_name, **connection_args) if vserver is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSLBVServer.delete(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSVServer.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_servicegroup_get(v_name, sg_name, **connection_args): ret = None nitro = _connect(**connection_args) if nitro is None: return None vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) try: vsgs = NSLBVServerServiceGroupBinding.get(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.get() failed: %s', error) return None for vsg in vsgs: if vsg.get_servicegroupname() == sg_name: ret = vsg _disconnect(nitro) return ret def vserver_servicegroup_exists(v_name, sg_name, **connection_args): ''' Checks if a servicegroup is tied to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_exists 'vserverName' 'serviceGroupName' ''' return _vserver_servicegroup_get(v_name, sg_name, **connection_args) is not None def vserver_servicegroup_add(v_name, sg_name, **connection_args): ''' Bind a servicegroup to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_add 'vserverName' 'serviceGroupName' ''' ret = True if vserver_servicegroup_exists(v_name, sg_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) vsg.set_servicegroupname(sg_name) try: NSLBVServerServiceGroupBinding.add(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_servicegroup_delete(v_name, sg_name, **connection_args): ''' Unbind a servicegroup from a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_delete 'vserverName' 'serviceGroupName' ''' ret = True if not vserver_servicegroup_exists(v_name, sg_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) vsg.set_servicegroupname(sg_name) try: NSLBVServerServiceGroupBinding.delete(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_sslcert_get(v_name, sc_name, **connection_args): ret = None nitro = _connect(**connection_args) if nitro is None: return None sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) try: sslcerts = NSSSLVServerSSLCertKeyBinding.get(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.get() failed: %s', error) return None for sslcert in sslcerts: if sslcert.get_certkeyname() == sc_name: ret = sslcert return ret def vserver_sslcert_exists(v_name, sc_name, **connection_args): ''' Checks if a SSL certificate is tied to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_exists 'vserverName' 'sslCertificateName' ''' return _vserver_sslcert_get(v_name, sc_name, **connection_args) is not None def vserver_sslcert_add(v_name, sc_name, **connection_args): ''' Binds a SSL certificate to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_add 'vserverName' 'sslCertificateName' ''' ret = True if vserver_sslcert_exists(v_name, sc_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) sslcert.set_certkeyname(sc_name) try: NSSSLVServerSSLCertKeyBinding.add(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_sslcert_delete(v_name, sc_name, **connection_args): ''' Unbinds a SSL certificate from a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_delete 'vserverName' 'sslCertificateName' ''' ret = True if not vserver_sslcert_exists(v_name, sc_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) sslcert.set_certkeyname(sc_name) try: NSSSLVServerSSLCertKeyBinding.delete(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.delete() failed: %s', error) ret = False _disconnect(nitro) return ret
saltstack/salt
salt/modules/netscaler.py
vserver_add
python
def vserver_add(v_name, v_ip, v_port, v_type, **connection_args): ''' Add a new lb vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_add 'vserverName' 'vserverIP' 'vserverPort' 'vserverType' salt '*' netscaler.vserver_add 'alex.patate.chaude.443' '1.2.3.4' '443' 'SSL' ''' ret = True if vserver_exists(v_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vserver = NSLBVServer() vserver.set_name(v_name) vserver.set_ipv46(v_ip) vserver.set_port(v_port) vserver.set_servicetype(v_type.upper()) try: NSLBVServer.add(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSLBVServer.add() failed: %s', error) ret = False _disconnect(nitro) return ret
Add a new lb vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_add 'vserverName' 'vserverIP' 'vserverPort' 'vserverType' salt '*' netscaler.vserver_add 'alex.patate.chaude.443' '1.2.3.4' '443' 'SSL'
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/netscaler.py#L749-L777
[ "def _connect(**kwargs):\n '''\n Initialise netscaler connection\n '''\n connargs = dict()\n\n # Shamelessy ripped from the mysql module\n def __connarg(name, key=None, default=None):\n '''\n Add key to connargs, only if name exists in our kwargs or as\n netscaler.<name> in __opts__ or __pillar__ Evaluate in said order - kwargs,\n opts then pillar. To avoid collision with other functions, kwargs-based\n connection arguments are prefixed with 'netscaler_' (i.e.\n 'netscaler_host', 'netscaler_user', etc.).\n '''\n if key is None:\n key = name\n if name in kwargs:\n connargs[key] = kwargs[name]\n else:\n prefix = 'netscaler_'\n if name.startswith(prefix):\n try:\n name = name[len(prefix):]\n except IndexError:\n return\n val = __salt__['config.option']('netscaler.{0}'.format(name), None)\n if val is not None:\n connargs[key] = val\n elif default is not None:\n connargs[key] = default\n\n __connarg('netscaler_host', 'host')\n __connarg('netscaler_user', 'user')\n __connarg('netscaler_pass', 'pass')\n __connarg('netscaler_useSSL', 'useSSL', True)\n\n nitro = NSNitro(connargs['host'], connargs['user'], connargs['pass'], connargs['useSSL'])\n try:\n nitro.login()\n except NSNitroError as error:\n log.debug('netscaler module error - NSNitro.login() failed: %s', error)\n return None\n return nitro\n", "def _disconnect(nitro):\n try:\n nitro.logout()\n except NSNitroError as error:\n log.debug('netscaler module error - NSNitro.logout() failed: %s', error)\n return None\n return nitro\n", "def vserver_exists(v_name, v_ip=None, v_port=None, v_type=None, **connection_args):\n '''\n Checks if a vserver exists\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' netscaler.vserver_exists 'vserverName'\n '''\n vserver = _vserver_get(v_name, **connection_args)\n if vserver is None:\n return False\n if v_ip is not None and vserver.get_ipv46() != v_ip:\n return False\n if v_port is not None and vserver.get_port() != v_port:\n return False\n if v_type is not None and vserver.get_servicetype().upper() != v_type.upper():\n return False\n return True\n" ]
# -*- coding: utf-8 -*- ''' Module to provide Citrix Netscaler compatibility to Salt (compatible with netscaler 9.2+) .. versionadded:: 2015.2.0 :depends: - nsnitro Python module .. note:: You can install nsnitro using: .. code-block:: bash pip install nsnitro :configuration: This module accepts connection configuration details either as parameters, or as configuration settings in /etc/salt/minion on the relevant minions .. code-block:: yaml netscaler.host: 1.2.3.4 netscaler.user: user netscaler.pass: password This data can also be passed into pillar. Options passed into opts will overwrite options passed into pillar. :CLI Examples: Calls relying on configuration passed using /etc/salt/minion, grains, or pillars: .. code-block:: bash salt-call netscaler.server_exists server_name Calls passing configuration as opts .. code-block:: bash salt-call netscaler.server_exists server_name netscaler_host=1.2.3.4 netscaler_user=username netscaler_pass=password salt-call netscaler.server_exists server_name netscaler_host=1.2.3.5 netscaler_user=username2 netscaler_pass=password2 salt-call netscaler.server_enable server_name2 netscaler_host=1.2.3.5 salt-call netscaler.server_up server_name3 netscaler_host=1.2.3.6 netscaler_useSSL=False ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import logging # Import Salt libs import salt.utils.platform try: from nsnitro.nsnitro import NSNitro from nsnitro.nsexceptions import NSNitroError from nsnitro.nsresources.nsserver import NSServer from nsnitro.nsresources.nsservice import NSService from nsnitro.nsresources.nsservicegroup import NSServiceGroup from nsnitro.nsresources.nsservicegroupserverbinding import NSServiceGroupServerBinding from nsnitro.nsresources.nslbvserver import NSLBVServer from nsnitro.nsresources.nslbvserverservicegroupbinding import NSLBVServerServiceGroupBinding from nsnitro.nsresources.nssslvserversslcertkeybinding import NSSSLVServerSSLCertKeyBinding HAS_NSNITRO = True except ImportError: HAS_NSNITRO = False log = logging.getLogger(__name__) def __virtual__(): ''' Only load this module if the nsnitro library is installed ''' if salt.utils.platform.is_windows(): return ( False, 'The netscaler execution module failed to load: not available ' 'on Windows.' ) if HAS_NSNITRO: return 'netscaler' return ( False, 'The netscaler execution module failed to load: the nsnitro python ' 'library is not available.' ) def _connect(**kwargs): ''' Initialise netscaler connection ''' connargs = dict() # Shamelessy ripped from the mysql module def __connarg(name, key=None, default=None): ''' Add key to connargs, only if name exists in our kwargs or as netscaler.<name> in __opts__ or __pillar__ Evaluate in said order - kwargs, opts then pillar. To avoid collision with other functions, kwargs-based connection arguments are prefixed with 'netscaler_' (i.e. 'netscaler_host', 'netscaler_user', etc.). ''' if key is None: key = name if name in kwargs: connargs[key] = kwargs[name] else: prefix = 'netscaler_' if name.startswith(prefix): try: name = name[len(prefix):] except IndexError: return val = __salt__['config.option']('netscaler.{0}'.format(name), None) if val is not None: connargs[key] = val elif default is not None: connargs[key] = default __connarg('netscaler_host', 'host') __connarg('netscaler_user', 'user') __connarg('netscaler_pass', 'pass') __connarg('netscaler_useSSL', 'useSSL', True) nitro = NSNitro(connargs['host'], connargs['user'], connargs['pass'], connargs['useSSL']) try: nitro.login() except NSNitroError as error: log.debug('netscaler module error - NSNitro.login() failed: %s', error) return None return nitro def _disconnect(nitro): try: nitro.logout() except NSNitroError as error: log.debug('netscaler module error - NSNitro.logout() failed: %s', error) return None return nitro def _servicegroup_get(sg_name, **connection_args): ''' Return a service group ressource or None ''' nitro = _connect(**connection_args) if nitro is None: return None sg = NSServiceGroup() sg.set_servicegroupname(sg_name) try: sg = NSServiceGroup.get(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.get() failed: %s', error) sg = None _disconnect(nitro) return sg def _servicegroup_get_servers(sg_name, **connection_args): ''' Returns a list of members of a servicegroup or None ''' nitro = _connect(**connection_args) if nitro is None: return None sg = NSServiceGroup() sg.set_servicegroupname(sg_name) try: sg = NSServiceGroup.get_servers(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.get_servers failed(): %s', error) sg = None _disconnect(nitro) return sg def _servicegroup_get_server(sg_name, s_name, s_port=None, **connection_args): ''' Returns a member of a service group or None ''' ret = None servers = _servicegroup_get_servers(sg_name, **connection_args) if servers is None: return None for server in servers: if server.get_servername() == s_name: if s_port is not None and s_port != server.get_port(): ret = None ret = server return ret def servicegroup_exists(sg_name, sg_type=None, **connection_args): ''' Checks if a service group exists CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_exists 'serviceGroupName' ''' sg = _servicegroup_get(sg_name, **connection_args) if sg is None: return False if sg_type is not None and sg_type.upper() != sg.get_servicetype(): return False return True def servicegroup_add(sg_name, sg_type='HTTP', **connection_args): ''' Add a new service group If no service type is specified, HTTP will be used. Most common service types: HTTP, SSL, and SSL_BRIDGE CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_add 'serviceGroupName' salt '*' netscaler.servicegroup_add 'serviceGroupName' 'serviceGroupType' ''' ret = True if servicegroup_exists(sg_name): return False nitro = _connect(**connection_args) if nitro is None: return False sg = NSServiceGroup() sg.set_servicegroupname(sg_name) sg.set_servicetype(sg_type.upper()) try: NSServiceGroup.add(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.add() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_delete(sg_name, **connection_args): ''' Delete a new service group CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_delete 'serviceGroupName' ''' ret = True sg = _servicegroup_get(sg_name, **connection_args) if sg is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.delete(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_exists(sg_name, s_name, s_port=None, **connection_args): ''' Check if a server:port combination is a member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_exists 'serviceGroupName' 'serverName' 'serverPort' ''' return _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) is not None def servicegroup_server_up(sg_name, s_name, s_port, **connection_args): ''' Check if a server:port combination is in state UP in a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_up 'serviceGroupName' 'serverName' 'serverPort' ''' server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) return server is not None and server.get_svrstate() == 'UP' def servicegroup_server_enable(sg_name, s_name, s_port, **connection_args): ''' Enable a server:port member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_enable 'serviceGroupName' 'serverName' 'serverPort' ''' ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.enable_server(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.enable_server() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_disable(sg_name, s_name, s_port, **connection_args): ''' Disable a server:port member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_disable 'serviceGroupName' 'serverName' 'serverPort' ''' ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.disable_server(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.disable_server() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_add(sg_name, s_name, s_port, **connection_args): ''' Add a server:port member to a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_add 'serviceGroupName' 'serverName' 'serverPort' ''' # Nitro will throw an error if the server is already present ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is not None: return False nitro = _connect(**connection_args) if nitro is None: return False sgsb = NSServiceGroupServerBinding() sgsb.set_servicegroupname(sg_name) sgsb.set_servername(s_name) sgsb.set_port(s_port) try: NSServiceGroupServerBinding.add(nitro, sgsb) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroupServerBinding() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_delete(sg_name, s_name, s_port, **connection_args): ''' Remove a server:port member from a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_delete 'serviceGroupName' 'serverName' 'serverPort' ''' # Nitro will throw an error if the server is already not present ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False sgsb = NSServiceGroupServerBinding() sgsb.set_servicegroupname(sg_name) sgsb.set_servername(s_name) sgsb.set_port(s_port) try: NSServiceGroupServerBinding.delete(nitro, sgsb) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroupServerBinding() failed: %s', error) ret = False _disconnect(nitro) return ret def _service_get(s_name, **connection_args): ''' Returns a service ressource or None ''' nitro = _connect(**connection_args) if nitro is None: return None service = NSService() service.set_name(s_name) try: service = NSService.get(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.get() failed: %s', error) service = None _disconnect(nitro) return service def service_exists(s_name, **connection_args): ''' Checks if a service exists CLI Example: .. code-block:: bash salt '*' netscaler.service_exists 'serviceName' ''' return _service_get(s_name, **connection_args) is not None def service_up(s_name, **connection_args): ''' Checks if a service is UP CLI Example: .. code-block:: bash salt '*' netscaler.service_up 'serviceName' ''' service = _service_get(s_name, **connection_args) return service is not None and service.get_svrstate() == 'UP' def service_enable(s_name, **connection_args): ''' Enable a service CLI Example: .. code-block:: bash salt '*' netscaler.service_enable 'serviceName' ''' ret = True service = _service_get(s_name, **connection_args) if service is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSService.enable(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def service_disable(s_name, s_delay=None, **connection_args): ''' Disable a service CLI Example: .. code-block:: bash salt '*' netscaler.service_disable 'serviceName' salt '*' netscaler.service_disable 'serviceName' 'delayInSeconds' ''' ret = True service = _service_get(s_name, **connection_args) if service is None: return False if s_delay is not None: service.set_delay(s_delay) nitro = _connect(**connection_args) if nitro is None: return False try: NSService.disable(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def _server_get(s_name, **connection_args): nitro = _connect(**connection_args) if nitro is None: return None server = NSServer() server.set_name(s_name) try: server = NSServer.get(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.get() failed: %s', error) server = None _disconnect(nitro) return server def server_exists(s_name, ip=None, s_state=None, **connection_args): ''' Checks if a server exists CLI Example: .. code-block:: bash salt '*' netscaler.server_exists 'serverName' ''' server = _server_get(s_name, **connection_args) if server is None: return False if ip is not None and ip != server.get_ipaddress(): return False if s_state is not None and s_state.upper() != server.get_state(): return False return True def server_add(s_name, s_ip, s_state=None, **connection_args): ''' Add a server Note: The default server state is ENABLED CLI Example: .. code-block:: bash salt '*' netscaler.server_add 'serverName' 'serverIpAddress' salt '*' netscaler.server_add 'serverName' 'serverIpAddress' 'serverState' ''' ret = True if server_exists(s_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False server = NSServer() server.set_name(s_name) server.set_ipaddress(s_ip) if s_state is not None: server.set_state(s_state) try: NSServer.add(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.add() failed: %s', error) ret = False _disconnect(nitro) return ret def server_delete(s_name, **connection_args): ''' Delete a server CLI Example: .. code-block:: bash salt '*' netscaler.server_delete 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.delete(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def server_update(s_name, s_ip, **connection_args): ''' Update a server's attributes CLI Example: .. code-block:: bash salt '*' netscaler.server_update 'serverName' 'serverIP' ''' altered = False cur_server = _server_get(s_name, **connection_args) if cur_server is None: return False alt_server = NSServer() alt_server.set_name(s_name) if cur_server.get_ipaddress() != s_ip: alt_server.set_ipaddress(s_ip) altered = True # Nothing to update, the server is already idem if altered is False: return False # Perform the update nitro = _connect(**connection_args) if nitro is None: return False ret = True try: NSServer.update(nitro, alt_server) except NSNitroError as error: log.debug('netscaler module error - NSServer.update() failed: %s', error) ret = False _disconnect(nitro) return ret def server_enabled(s_name, **connection_args): ''' Check if a server is enabled globally CLI Example: .. code-block:: bash salt '*' netscaler.server_enabled 'serverName' ''' server = _server_get(s_name, **connection_args) return server is not None and server.get_state() == 'ENABLED' def server_enable(s_name, **connection_args): ''' Enables a server globally CLI Example: .. code-block:: bash salt '*' netscaler.server_enable 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False if server.get_state() == 'ENABLED': return True nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.enable(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def server_disable(s_name, **connection_args): ''' Disable a server globally CLI Example: .. code-block:: bash salt '*' netscaler.server_disable 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False if server.get_state() == 'DISABLED': return True nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.disable(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.disable() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_get(v_name, **connection_args): nitro = _connect(**connection_args) vserver = NSLBVServer() vserver.set_name(v_name) if nitro is None: return None try: vserver = NSLBVServer.get(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSLBVServer.get() failed: %s', error) vserver = None _disconnect(nitro) return vserver def vserver_exists(v_name, v_ip=None, v_port=None, v_type=None, **connection_args): ''' Checks if a vserver exists CLI Example: .. code-block:: bash salt '*' netscaler.vserver_exists 'vserverName' ''' vserver = _vserver_get(v_name, **connection_args) if vserver is None: return False if v_ip is not None and vserver.get_ipv46() != v_ip: return False if v_port is not None and vserver.get_port() != v_port: return False if v_type is not None and vserver.get_servicetype().upper() != v_type.upper(): return False return True def vserver_delete(v_name, **connection_args): ''' Delete a lb vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_delete 'vserverName' ''' ret = True vserver = _vserver_get(v_name, **connection_args) if vserver is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSLBVServer.delete(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSVServer.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_servicegroup_get(v_name, sg_name, **connection_args): ret = None nitro = _connect(**connection_args) if nitro is None: return None vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) try: vsgs = NSLBVServerServiceGroupBinding.get(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.get() failed: %s', error) return None for vsg in vsgs: if vsg.get_servicegroupname() == sg_name: ret = vsg _disconnect(nitro) return ret def vserver_servicegroup_exists(v_name, sg_name, **connection_args): ''' Checks if a servicegroup is tied to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_exists 'vserverName' 'serviceGroupName' ''' return _vserver_servicegroup_get(v_name, sg_name, **connection_args) is not None def vserver_servicegroup_add(v_name, sg_name, **connection_args): ''' Bind a servicegroup to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_add 'vserverName' 'serviceGroupName' ''' ret = True if vserver_servicegroup_exists(v_name, sg_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) vsg.set_servicegroupname(sg_name) try: NSLBVServerServiceGroupBinding.add(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_servicegroup_delete(v_name, sg_name, **connection_args): ''' Unbind a servicegroup from a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_delete 'vserverName' 'serviceGroupName' ''' ret = True if not vserver_servicegroup_exists(v_name, sg_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) vsg.set_servicegroupname(sg_name) try: NSLBVServerServiceGroupBinding.delete(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_sslcert_get(v_name, sc_name, **connection_args): ret = None nitro = _connect(**connection_args) if nitro is None: return None sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) try: sslcerts = NSSSLVServerSSLCertKeyBinding.get(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.get() failed: %s', error) return None for sslcert in sslcerts: if sslcert.get_certkeyname() == sc_name: ret = sslcert return ret def vserver_sslcert_exists(v_name, sc_name, **connection_args): ''' Checks if a SSL certificate is tied to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_exists 'vserverName' 'sslCertificateName' ''' return _vserver_sslcert_get(v_name, sc_name, **connection_args) is not None def vserver_sslcert_add(v_name, sc_name, **connection_args): ''' Binds a SSL certificate to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_add 'vserverName' 'sslCertificateName' ''' ret = True if vserver_sslcert_exists(v_name, sc_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) sslcert.set_certkeyname(sc_name) try: NSSSLVServerSSLCertKeyBinding.add(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_sslcert_delete(v_name, sc_name, **connection_args): ''' Unbinds a SSL certificate from a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_delete 'vserverName' 'sslCertificateName' ''' ret = True if not vserver_sslcert_exists(v_name, sc_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) sslcert.set_certkeyname(sc_name) try: NSSSLVServerSSLCertKeyBinding.delete(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.delete() failed: %s', error) ret = False _disconnect(nitro) return ret
saltstack/salt
salt/modules/netscaler.py
vserver_delete
python
def vserver_delete(v_name, **connection_args): ''' Delete a lb vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_delete 'vserverName' ''' ret = True vserver = _vserver_get(v_name, **connection_args) if vserver is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSLBVServer.delete(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSVServer.delete() failed: %s', error) ret = False _disconnect(nitro) return ret
Delete a lb vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_delete 'vserverName'
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/netscaler.py#L780-L803
[ "def _connect(**kwargs):\n '''\n Initialise netscaler connection\n '''\n connargs = dict()\n\n # Shamelessy ripped from the mysql module\n def __connarg(name, key=None, default=None):\n '''\n Add key to connargs, only if name exists in our kwargs or as\n netscaler.<name> in __opts__ or __pillar__ Evaluate in said order - kwargs,\n opts then pillar. To avoid collision with other functions, kwargs-based\n connection arguments are prefixed with 'netscaler_' (i.e.\n 'netscaler_host', 'netscaler_user', etc.).\n '''\n if key is None:\n key = name\n if name in kwargs:\n connargs[key] = kwargs[name]\n else:\n prefix = 'netscaler_'\n if name.startswith(prefix):\n try:\n name = name[len(prefix):]\n except IndexError:\n return\n val = __salt__['config.option']('netscaler.{0}'.format(name), None)\n if val is not None:\n connargs[key] = val\n elif default is not None:\n connargs[key] = default\n\n __connarg('netscaler_host', 'host')\n __connarg('netscaler_user', 'user')\n __connarg('netscaler_pass', 'pass')\n __connarg('netscaler_useSSL', 'useSSL', True)\n\n nitro = NSNitro(connargs['host'], connargs['user'], connargs['pass'], connargs['useSSL'])\n try:\n nitro.login()\n except NSNitroError as error:\n log.debug('netscaler module error - NSNitro.login() failed: %s', error)\n return None\n return nitro\n", "def _disconnect(nitro):\n try:\n nitro.logout()\n except NSNitroError as error:\n log.debug('netscaler module error - NSNitro.logout() failed: %s', error)\n return None\n return nitro\n", "def _vserver_get(v_name, **connection_args):\n nitro = _connect(**connection_args)\n vserver = NSLBVServer()\n vserver.set_name(v_name)\n if nitro is None:\n return None\n try:\n vserver = NSLBVServer.get(nitro, vserver)\n except NSNitroError as error:\n log.debug('netscaler module error - NSLBVServer.get() failed: %s', error)\n vserver = None\n _disconnect(nitro)\n return vserver\n" ]
# -*- coding: utf-8 -*- ''' Module to provide Citrix Netscaler compatibility to Salt (compatible with netscaler 9.2+) .. versionadded:: 2015.2.0 :depends: - nsnitro Python module .. note:: You can install nsnitro using: .. code-block:: bash pip install nsnitro :configuration: This module accepts connection configuration details either as parameters, or as configuration settings in /etc/salt/minion on the relevant minions .. code-block:: yaml netscaler.host: 1.2.3.4 netscaler.user: user netscaler.pass: password This data can also be passed into pillar. Options passed into opts will overwrite options passed into pillar. :CLI Examples: Calls relying on configuration passed using /etc/salt/minion, grains, or pillars: .. code-block:: bash salt-call netscaler.server_exists server_name Calls passing configuration as opts .. code-block:: bash salt-call netscaler.server_exists server_name netscaler_host=1.2.3.4 netscaler_user=username netscaler_pass=password salt-call netscaler.server_exists server_name netscaler_host=1.2.3.5 netscaler_user=username2 netscaler_pass=password2 salt-call netscaler.server_enable server_name2 netscaler_host=1.2.3.5 salt-call netscaler.server_up server_name3 netscaler_host=1.2.3.6 netscaler_useSSL=False ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import logging # Import Salt libs import salt.utils.platform try: from nsnitro.nsnitro import NSNitro from nsnitro.nsexceptions import NSNitroError from nsnitro.nsresources.nsserver import NSServer from nsnitro.nsresources.nsservice import NSService from nsnitro.nsresources.nsservicegroup import NSServiceGroup from nsnitro.nsresources.nsservicegroupserverbinding import NSServiceGroupServerBinding from nsnitro.nsresources.nslbvserver import NSLBVServer from nsnitro.nsresources.nslbvserverservicegroupbinding import NSLBVServerServiceGroupBinding from nsnitro.nsresources.nssslvserversslcertkeybinding import NSSSLVServerSSLCertKeyBinding HAS_NSNITRO = True except ImportError: HAS_NSNITRO = False log = logging.getLogger(__name__) def __virtual__(): ''' Only load this module if the nsnitro library is installed ''' if salt.utils.platform.is_windows(): return ( False, 'The netscaler execution module failed to load: not available ' 'on Windows.' ) if HAS_NSNITRO: return 'netscaler' return ( False, 'The netscaler execution module failed to load: the nsnitro python ' 'library is not available.' ) def _connect(**kwargs): ''' Initialise netscaler connection ''' connargs = dict() # Shamelessy ripped from the mysql module def __connarg(name, key=None, default=None): ''' Add key to connargs, only if name exists in our kwargs or as netscaler.<name> in __opts__ or __pillar__ Evaluate in said order - kwargs, opts then pillar. To avoid collision with other functions, kwargs-based connection arguments are prefixed with 'netscaler_' (i.e. 'netscaler_host', 'netscaler_user', etc.). ''' if key is None: key = name if name in kwargs: connargs[key] = kwargs[name] else: prefix = 'netscaler_' if name.startswith(prefix): try: name = name[len(prefix):] except IndexError: return val = __salt__['config.option']('netscaler.{0}'.format(name), None) if val is not None: connargs[key] = val elif default is not None: connargs[key] = default __connarg('netscaler_host', 'host') __connarg('netscaler_user', 'user') __connarg('netscaler_pass', 'pass') __connarg('netscaler_useSSL', 'useSSL', True) nitro = NSNitro(connargs['host'], connargs['user'], connargs['pass'], connargs['useSSL']) try: nitro.login() except NSNitroError as error: log.debug('netscaler module error - NSNitro.login() failed: %s', error) return None return nitro def _disconnect(nitro): try: nitro.logout() except NSNitroError as error: log.debug('netscaler module error - NSNitro.logout() failed: %s', error) return None return nitro def _servicegroup_get(sg_name, **connection_args): ''' Return a service group ressource or None ''' nitro = _connect(**connection_args) if nitro is None: return None sg = NSServiceGroup() sg.set_servicegroupname(sg_name) try: sg = NSServiceGroup.get(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.get() failed: %s', error) sg = None _disconnect(nitro) return sg def _servicegroup_get_servers(sg_name, **connection_args): ''' Returns a list of members of a servicegroup or None ''' nitro = _connect(**connection_args) if nitro is None: return None sg = NSServiceGroup() sg.set_servicegroupname(sg_name) try: sg = NSServiceGroup.get_servers(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.get_servers failed(): %s', error) sg = None _disconnect(nitro) return sg def _servicegroup_get_server(sg_name, s_name, s_port=None, **connection_args): ''' Returns a member of a service group or None ''' ret = None servers = _servicegroup_get_servers(sg_name, **connection_args) if servers is None: return None for server in servers: if server.get_servername() == s_name: if s_port is not None and s_port != server.get_port(): ret = None ret = server return ret def servicegroup_exists(sg_name, sg_type=None, **connection_args): ''' Checks if a service group exists CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_exists 'serviceGroupName' ''' sg = _servicegroup_get(sg_name, **connection_args) if sg is None: return False if sg_type is not None and sg_type.upper() != sg.get_servicetype(): return False return True def servicegroup_add(sg_name, sg_type='HTTP', **connection_args): ''' Add a new service group If no service type is specified, HTTP will be used. Most common service types: HTTP, SSL, and SSL_BRIDGE CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_add 'serviceGroupName' salt '*' netscaler.servicegroup_add 'serviceGroupName' 'serviceGroupType' ''' ret = True if servicegroup_exists(sg_name): return False nitro = _connect(**connection_args) if nitro is None: return False sg = NSServiceGroup() sg.set_servicegroupname(sg_name) sg.set_servicetype(sg_type.upper()) try: NSServiceGroup.add(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.add() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_delete(sg_name, **connection_args): ''' Delete a new service group CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_delete 'serviceGroupName' ''' ret = True sg = _servicegroup_get(sg_name, **connection_args) if sg is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.delete(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_exists(sg_name, s_name, s_port=None, **connection_args): ''' Check if a server:port combination is a member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_exists 'serviceGroupName' 'serverName' 'serverPort' ''' return _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) is not None def servicegroup_server_up(sg_name, s_name, s_port, **connection_args): ''' Check if a server:port combination is in state UP in a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_up 'serviceGroupName' 'serverName' 'serverPort' ''' server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) return server is not None and server.get_svrstate() == 'UP' def servicegroup_server_enable(sg_name, s_name, s_port, **connection_args): ''' Enable a server:port member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_enable 'serviceGroupName' 'serverName' 'serverPort' ''' ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.enable_server(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.enable_server() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_disable(sg_name, s_name, s_port, **connection_args): ''' Disable a server:port member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_disable 'serviceGroupName' 'serverName' 'serverPort' ''' ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.disable_server(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.disable_server() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_add(sg_name, s_name, s_port, **connection_args): ''' Add a server:port member to a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_add 'serviceGroupName' 'serverName' 'serverPort' ''' # Nitro will throw an error if the server is already present ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is not None: return False nitro = _connect(**connection_args) if nitro is None: return False sgsb = NSServiceGroupServerBinding() sgsb.set_servicegroupname(sg_name) sgsb.set_servername(s_name) sgsb.set_port(s_port) try: NSServiceGroupServerBinding.add(nitro, sgsb) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroupServerBinding() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_delete(sg_name, s_name, s_port, **connection_args): ''' Remove a server:port member from a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_delete 'serviceGroupName' 'serverName' 'serverPort' ''' # Nitro will throw an error if the server is already not present ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False sgsb = NSServiceGroupServerBinding() sgsb.set_servicegroupname(sg_name) sgsb.set_servername(s_name) sgsb.set_port(s_port) try: NSServiceGroupServerBinding.delete(nitro, sgsb) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroupServerBinding() failed: %s', error) ret = False _disconnect(nitro) return ret def _service_get(s_name, **connection_args): ''' Returns a service ressource or None ''' nitro = _connect(**connection_args) if nitro is None: return None service = NSService() service.set_name(s_name) try: service = NSService.get(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.get() failed: %s', error) service = None _disconnect(nitro) return service def service_exists(s_name, **connection_args): ''' Checks if a service exists CLI Example: .. code-block:: bash salt '*' netscaler.service_exists 'serviceName' ''' return _service_get(s_name, **connection_args) is not None def service_up(s_name, **connection_args): ''' Checks if a service is UP CLI Example: .. code-block:: bash salt '*' netscaler.service_up 'serviceName' ''' service = _service_get(s_name, **connection_args) return service is not None and service.get_svrstate() == 'UP' def service_enable(s_name, **connection_args): ''' Enable a service CLI Example: .. code-block:: bash salt '*' netscaler.service_enable 'serviceName' ''' ret = True service = _service_get(s_name, **connection_args) if service is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSService.enable(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def service_disable(s_name, s_delay=None, **connection_args): ''' Disable a service CLI Example: .. code-block:: bash salt '*' netscaler.service_disable 'serviceName' salt '*' netscaler.service_disable 'serviceName' 'delayInSeconds' ''' ret = True service = _service_get(s_name, **connection_args) if service is None: return False if s_delay is not None: service.set_delay(s_delay) nitro = _connect(**connection_args) if nitro is None: return False try: NSService.disable(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def _server_get(s_name, **connection_args): nitro = _connect(**connection_args) if nitro is None: return None server = NSServer() server.set_name(s_name) try: server = NSServer.get(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.get() failed: %s', error) server = None _disconnect(nitro) return server def server_exists(s_name, ip=None, s_state=None, **connection_args): ''' Checks if a server exists CLI Example: .. code-block:: bash salt '*' netscaler.server_exists 'serverName' ''' server = _server_get(s_name, **connection_args) if server is None: return False if ip is not None and ip != server.get_ipaddress(): return False if s_state is not None and s_state.upper() != server.get_state(): return False return True def server_add(s_name, s_ip, s_state=None, **connection_args): ''' Add a server Note: The default server state is ENABLED CLI Example: .. code-block:: bash salt '*' netscaler.server_add 'serverName' 'serverIpAddress' salt '*' netscaler.server_add 'serverName' 'serverIpAddress' 'serverState' ''' ret = True if server_exists(s_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False server = NSServer() server.set_name(s_name) server.set_ipaddress(s_ip) if s_state is not None: server.set_state(s_state) try: NSServer.add(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.add() failed: %s', error) ret = False _disconnect(nitro) return ret def server_delete(s_name, **connection_args): ''' Delete a server CLI Example: .. code-block:: bash salt '*' netscaler.server_delete 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.delete(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def server_update(s_name, s_ip, **connection_args): ''' Update a server's attributes CLI Example: .. code-block:: bash salt '*' netscaler.server_update 'serverName' 'serverIP' ''' altered = False cur_server = _server_get(s_name, **connection_args) if cur_server is None: return False alt_server = NSServer() alt_server.set_name(s_name) if cur_server.get_ipaddress() != s_ip: alt_server.set_ipaddress(s_ip) altered = True # Nothing to update, the server is already idem if altered is False: return False # Perform the update nitro = _connect(**connection_args) if nitro is None: return False ret = True try: NSServer.update(nitro, alt_server) except NSNitroError as error: log.debug('netscaler module error - NSServer.update() failed: %s', error) ret = False _disconnect(nitro) return ret def server_enabled(s_name, **connection_args): ''' Check if a server is enabled globally CLI Example: .. code-block:: bash salt '*' netscaler.server_enabled 'serverName' ''' server = _server_get(s_name, **connection_args) return server is not None and server.get_state() == 'ENABLED' def server_enable(s_name, **connection_args): ''' Enables a server globally CLI Example: .. code-block:: bash salt '*' netscaler.server_enable 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False if server.get_state() == 'ENABLED': return True nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.enable(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def server_disable(s_name, **connection_args): ''' Disable a server globally CLI Example: .. code-block:: bash salt '*' netscaler.server_disable 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False if server.get_state() == 'DISABLED': return True nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.disable(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.disable() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_get(v_name, **connection_args): nitro = _connect(**connection_args) vserver = NSLBVServer() vserver.set_name(v_name) if nitro is None: return None try: vserver = NSLBVServer.get(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSLBVServer.get() failed: %s', error) vserver = None _disconnect(nitro) return vserver def vserver_exists(v_name, v_ip=None, v_port=None, v_type=None, **connection_args): ''' Checks if a vserver exists CLI Example: .. code-block:: bash salt '*' netscaler.vserver_exists 'vserverName' ''' vserver = _vserver_get(v_name, **connection_args) if vserver is None: return False if v_ip is not None and vserver.get_ipv46() != v_ip: return False if v_port is not None and vserver.get_port() != v_port: return False if v_type is not None and vserver.get_servicetype().upper() != v_type.upper(): return False return True def vserver_add(v_name, v_ip, v_port, v_type, **connection_args): ''' Add a new lb vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_add 'vserverName' 'vserverIP' 'vserverPort' 'vserverType' salt '*' netscaler.vserver_add 'alex.patate.chaude.443' '1.2.3.4' '443' 'SSL' ''' ret = True if vserver_exists(v_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vserver = NSLBVServer() vserver.set_name(v_name) vserver.set_ipv46(v_ip) vserver.set_port(v_port) vserver.set_servicetype(v_type.upper()) try: NSLBVServer.add(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSLBVServer.add() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_servicegroup_get(v_name, sg_name, **connection_args): ret = None nitro = _connect(**connection_args) if nitro is None: return None vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) try: vsgs = NSLBVServerServiceGroupBinding.get(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.get() failed: %s', error) return None for vsg in vsgs: if vsg.get_servicegroupname() == sg_name: ret = vsg _disconnect(nitro) return ret def vserver_servicegroup_exists(v_name, sg_name, **connection_args): ''' Checks if a servicegroup is tied to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_exists 'vserverName' 'serviceGroupName' ''' return _vserver_servicegroup_get(v_name, sg_name, **connection_args) is not None def vserver_servicegroup_add(v_name, sg_name, **connection_args): ''' Bind a servicegroup to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_add 'vserverName' 'serviceGroupName' ''' ret = True if vserver_servicegroup_exists(v_name, sg_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) vsg.set_servicegroupname(sg_name) try: NSLBVServerServiceGroupBinding.add(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_servicegroup_delete(v_name, sg_name, **connection_args): ''' Unbind a servicegroup from a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_delete 'vserverName' 'serviceGroupName' ''' ret = True if not vserver_servicegroup_exists(v_name, sg_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) vsg.set_servicegroupname(sg_name) try: NSLBVServerServiceGroupBinding.delete(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_sslcert_get(v_name, sc_name, **connection_args): ret = None nitro = _connect(**connection_args) if nitro is None: return None sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) try: sslcerts = NSSSLVServerSSLCertKeyBinding.get(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.get() failed: %s', error) return None for sslcert in sslcerts: if sslcert.get_certkeyname() == sc_name: ret = sslcert return ret def vserver_sslcert_exists(v_name, sc_name, **connection_args): ''' Checks if a SSL certificate is tied to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_exists 'vserverName' 'sslCertificateName' ''' return _vserver_sslcert_get(v_name, sc_name, **connection_args) is not None def vserver_sslcert_add(v_name, sc_name, **connection_args): ''' Binds a SSL certificate to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_add 'vserverName' 'sslCertificateName' ''' ret = True if vserver_sslcert_exists(v_name, sc_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) sslcert.set_certkeyname(sc_name) try: NSSSLVServerSSLCertKeyBinding.add(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_sslcert_delete(v_name, sc_name, **connection_args): ''' Unbinds a SSL certificate from a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_delete 'vserverName' 'sslCertificateName' ''' ret = True if not vserver_sslcert_exists(v_name, sc_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) sslcert.set_certkeyname(sc_name) try: NSSSLVServerSSLCertKeyBinding.delete(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.delete() failed: %s', error) ret = False _disconnect(nitro) return ret
saltstack/salt
salt/modules/netscaler.py
vserver_servicegroup_add
python
def vserver_servicegroup_add(v_name, sg_name, **connection_args): ''' Bind a servicegroup to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_add 'vserverName' 'serviceGroupName' ''' ret = True if vserver_servicegroup_exists(v_name, sg_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) vsg.set_servicegroupname(sg_name) try: NSLBVServerServiceGroupBinding.add(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.add() failed: %s', error) ret = False _disconnect(nitro) return ret
Bind a servicegroup to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_add 'vserverName' 'serviceGroupName'
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/netscaler.py#L838-L863
[ "def _connect(**kwargs):\n '''\n Initialise netscaler connection\n '''\n connargs = dict()\n\n # Shamelessy ripped from the mysql module\n def __connarg(name, key=None, default=None):\n '''\n Add key to connargs, only if name exists in our kwargs or as\n netscaler.<name> in __opts__ or __pillar__ Evaluate in said order - kwargs,\n opts then pillar. To avoid collision with other functions, kwargs-based\n connection arguments are prefixed with 'netscaler_' (i.e.\n 'netscaler_host', 'netscaler_user', etc.).\n '''\n if key is None:\n key = name\n if name in kwargs:\n connargs[key] = kwargs[name]\n else:\n prefix = 'netscaler_'\n if name.startswith(prefix):\n try:\n name = name[len(prefix):]\n except IndexError:\n return\n val = __salt__['config.option']('netscaler.{0}'.format(name), None)\n if val is not None:\n connargs[key] = val\n elif default is not None:\n connargs[key] = default\n\n __connarg('netscaler_host', 'host')\n __connarg('netscaler_user', 'user')\n __connarg('netscaler_pass', 'pass')\n __connarg('netscaler_useSSL', 'useSSL', True)\n\n nitro = NSNitro(connargs['host'], connargs['user'], connargs['pass'], connargs['useSSL'])\n try:\n nitro.login()\n except NSNitroError as error:\n log.debug('netscaler module error - NSNitro.login() failed: %s', error)\n return None\n return nitro\n", "def _disconnect(nitro):\n try:\n nitro.logout()\n except NSNitroError as error:\n log.debug('netscaler module error - NSNitro.logout() failed: %s', error)\n return None\n return nitro\n", "def vserver_servicegroup_exists(v_name, sg_name, **connection_args):\n '''\n Checks if a servicegroup is tied to a vserver\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' netscaler.vserver_servicegroup_exists 'vserverName' 'serviceGroupName'\n '''\n return _vserver_servicegroup_get(v_name, sg_name, **connection_args) is not None\n" ]
# -*- coding: utf-8 -*- ''' Module to provide Citrix Netscaler compatibility to Salt (compatible with netscaler 9.2+) .. versionadded:: 2015.2.0 :depends: - nsnitro Python module .. note:: You can install nsnitro using: .. code-block:: bash pip install nsnitro :configuration: This module accepts connection configuration details either as parameters, or as configuration settings in /etc/salt/minion on the relevant minions .. code-block:: yaml netscaler.host: 1.2.3.4 netscaler.user: user netscaler.pass: password This data can also be passed into pillar. Options passed into opts will overwrite options passed into pillar. :CLI Examples: Calls relying on configuration passed using /etc/salt/minion, grains, or pillars: .. code-block:: bash salt-call netscaler.server_exists server_name Calls passing configuration as opts .. code-block:: bash salt-call netscaler.server_exists server_name netscaler_host=1.2.3.4 netscaler_user=username netscaler_pass=password salt-call netscaler.server_exists server_name netscaler_host=1.2.3.5 netscaler_user=username2 netscaler_pass=password2 salt-call netscaler.server_enable server_name2 netscaler_host=1.2.3.5 salt-call netscaler.server_up server_name3 netscaler_host=1.2.3.6 netscaler_useSSL=False ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import logging # Import Salt libs import salt.utils.platform try: from nsnitro.nsnitro import NSNitro from nsnitro.nsexceptions import NSNitroError from nsnitro.nsresources.nsserver import NSServer from nsnitro.nsresources.nsservice import NSService from nsnitro.nsresources.nsservicegroup import NSServiceGroup from nsnitro.nsresources.nsservicegroupserverbinding import NSServiceGroupServerBinding from nsnitro.nsresources.nslbvserver import NSLBVServer from nsnitro.nsresources.nslbvserverservicegroupbinding import NSLBVServerServiceGroupBinding from nsnitro.nsresources.nssslvserversslcertkeybinding import NSSSLVServerSSLCertKeyBinding HAS_NSNITRO = True except ImportError: HAS_NSNITRO = False log = logging.getLogger(__name__) def __virtual__(): ''' Only load this module if the nsnitro library is installed ''' if salt.utils.platform.is_windows(): return ( False, 'The netscaler execution module failed to load: not available ' 'on Windows.' ) if HAS_NSNITRO: return 'netscaler' return ( False, 'The netscaler execution module failed to load: the nsnitro python ' 'library is not available.' ) def _connect(**kwargs): ''' Initialise netscaler connection ''' connargs = dict() # Shamelessy ripped from the mysql module def __connarg(name, key=None, default=None): ''' Add key to connargs, only if name exists in our kwargs or as netscaler.<name> in __opts__ or __pillar__ Evaluate in said order - kwargs, opts then pillar. To avoid collision with other functions, kwargs-based connection arguments are prefixed with 'netscaler_' (i.e. 'netscaler_host', 'netscaler_user', etc.). ''' if key is None: key = name if name in kwargs: connargs[key] = kwargs[name] else: prefix = 'netscaler_' if name.startswith(prefix): try: name = name[len(prefix):] except IndexError: return val = __salt__['config.option']('netscaler.{0}'.format(name), None) if val is not None: connargs[key] = val elif default is not None: connargs[key] = default __connarg('netscaler_host', 'host') __connarg('netscaler_user', 'user') __connarg('netscaler_pass', 'pass') __connarg('netscaler_useSSL', 'useSSL', True) nitro = NSNitro(connargs['host'], connargs['user'], connargs['pass'], connargs['useSSL']) try: nitro.login() except NSNitroError as error: log.debug('netscaler module error - NSNitro.login() failed: %s', error) return None return nitro def _disconnect(nitro): try: nitro.logout() except NSNitroError as error: log.debug('netscaler module error - NSNitro.logout() failed: %s', error) return None return nitro def _servicegroup_get(sg_name, **connection_args): ''' Return a service group ressource or None ''' nitro = _connect(**connection_args) if nitro is None: return None sg = NSServiceGroup() sg.set_servicegroupname(sg_name) try: sg = NSServiceGroup.get(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.get() failed: %s', error) sg = None _disconnect(nitro) return sg def _servicegroup_get_servers(sg_name, **connection_args): ''' Returns a list of members of a servicegroup or None ''' nitro = _connect(**connection_args) if nitro is None: return None sg = NSServiceGroup() sg.set_servicegroupname(sg_name) try: sg = NSServiceGroup.get_servers(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.get_servers failed(): %s', error) sg = None _disconnect(nitro) return sg def _servicegroup_get_server(sg_name, s_name, s_port=None, **connection_args): ''' Returns a member of a service group or None ''' ret = None servers = _servicegroup_get_servers(sg_name, **connection_args) if servers is None: return None for server in servers: if server.get_servername() == s_name: if s_port is not None and s_port != server.get_port(): ret = None ret = server return ret def servicegroup_exists(sg_name, sg_type=None, **connection_args): ''' Checks if a service group exists CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_exists 'serviceGroupName' ''' sg = _servicegroup_get(sg_name, **connection_args) if sg is None: return False if sg_type is not None and sg_type.upper() != sg.get_servicetype(): return False return True def servicegroup_add(sg_name, sg_type='HTTP', **connection_args): ''' Add a new service group If no service type is specified, HTTP will be used. Most common service types: HTTP, SSL, and SSL_BRIDGE CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_add 'serviceGroupName' salt '*' netscaler.servicegroup_add 'serviceGroupName' 'serviceGroupType' ''' ret = True if servicegroup_exists(sg_name): return False nitro = _connect(**connection_args) if nitro is None: return False sg = NSServiceGroup() sg.set_servicegroupname(sg_name) sg.set_servicetype(sg_type.upper()) try: NSServiceGroup.add(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.add() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_delete(sg_name, **connection_args): ''' Delete a new service group CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_delete 'serviceGroupName' ''' ret = True sg = _servicegroup_get(sg_name, **connection_args) if sg is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.delete(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_exists(sg_name, s_name, s_port=None, **connection_args): ''' Check if a server:port combination is a member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_exists 'serviceGroupName' 'serverName' 'serverPort' ''' return _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) is not None def servicegroup_server_up(sg_name, s_name, s_port, **connection_args): ''' Check if a server:port combination is in state UP in a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_up 'serviceGroupName' 'serverName' 'serverPort' ''' server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) return server is not None and server.get_svrstate() == 'UP' def servicegroup_server_enable(sg_name, s_name, s_port, **connection_args): ''' Enable a server:port member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_enable 'serviceGroupName' 'serverName' 'serverPort' ''' ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.enable_server(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.enable_server() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_disable(sg_name, s_name, s_port, **connection_args): ''' Disable a server:port member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_disable 'serviceGroupName' 'serverName' 'serverPort' ''' ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.disable_server(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.disable_server() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_add(sg_name, s_name, s_port, **connection_args): ''' Add a server:port member to a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_add 'serviceGroupName' 'serverName' 'serverPort' ''' # Nitro will throw an error if the server is already present ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is not None: return False nitro = _connect(**connection_args) if nitro is None: return False sgsb = NSServiceGroupServerBinding() sgsb.set_servicegroupname(sg_name) sgsb.set_servername(s_name) sgsb.set_port(s_port) try: NSServiceGroupServerBinding.add(nitro, sgsb) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroupServerBinding() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_delete(sg_name, s_name, s_port, **connection_args): ''' Remove a server:port member from a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_delete 'serviceGroupName' 'serverName' 'serverPort' ''' # Nitro will throw an error if the server is already not present ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False sgsb = NSServiceGroupServerBinding() sgsb.set_servicegroupname(sg_name) sgsb.set_servername(s_name) sgsb.set_port(s_port) try: NSServiceGroupServerBinding.delete(nitro, sgsb) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroupServerBinding() failed: %s', error) ret = False _disconnect(nitro) return ret def _service_get(s_name, **connection_args): ''' Returns a service ressource or None ''' nitro = _connect(**connection_args) if nitro is None: return None service = NSService() service.set_name(s_name) try: service = NSService.get(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.get() failed: %s', error) service = None _disconnect(nitro) return service def service_exists(s_name, **connection_args): ''' Checks if a service exists CLI Example: .. code-block:: bash salt '*' netscaler.service_exists 'serviceName' ''' return _service_get(s_name, **connection_args) is not None def service_up(s_name, **connection_args): ''' Checks if a service is UP CLI Example: .. code-block:: bash salt '*' netscaler.service_up 'serviceName' ''' service = _service_get(s_name, **connection_args) return service is not None and service.get_svrstate() == 'UP' def service_enable(s_name, **connection_args): ''' Enable a service CLI Example: .. code-block:: bash salt '*' netscaler.service_enable 'serviceName' ''' ret = True service = _service_get(s_name, **connection_args) if service is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSService.enable(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def service_disable(s_name, s_delay=None, **connection_args): ''' Disable a service CLI Example: .. code-block:: bash salt '*' netscaler.service_disable 'serviceName' salt '*' netscaler.service_disable 'serviceName' 'delayInSeconds' ''' ret = True service = _service_get(s_name, **connection_args) if service is None: return False if s_delay is not None: service.set_delay(s_delay) nitro = _connect(**connection_args) if nitro is None: return False try: NSService.disable(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def _server_get(s_name, **connection_args): nitro = _connect(**connection_args) if nitro is None: return None server = NSServer() server.set_name(s_name) try: server = NSServer.get(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.get() failed: %s', error) server = None _disconnect(nitro) return server def server_exists(s_name, ip=None, s_state=None, **connection_args): ''' Checks if a server exists CLI Example: .. code-block:: bash salt '*' netscaler.server_exists 'serverName' ''' server = _server_get(s_name, **connection_args) if server is None: return False if ip is not None and ip != server.get_ipaddress(): return False if s_state is not None and s_state.upper() != server.get_state(): return False return True def server_add(s_name, s_ip, s_state=None, **connection_args): ''' Add a server Note: The default server state is ENABLED CLI Example: .. code-block:: bash salt '*' netscaler.server_add 'serverName' 'serverIpAddress' salt '*' netscaler.server_add 'serverName' 'serverIpAddress' 'serverState' ''' ret = True if server_exists(s_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False server = NSServer() server.set_name(s_name) server.set_ipaddress(s_ip) if s_state is not None: server.set_state(s_state) try: NSServer.add(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.add() failed: %s', error) ret = False _disconnect(nitro) return ret def server_delete(s_name, **connection_args): ''' Delete a server CLI Example: .. code-block:: bash salt '*' netscaler.server_delete 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.delete(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def server_update(s_name, s_ip, **connection_args): ''' Update a server's attributes CLI Example: .. code-block:: bash salt '*' netscaler.server_update 'serverName' 'serverIP' ''' altered = False cur_server = _server_get(s_name, **connection_args) if cur_server is None: return False alt_server = NSServer() alt_server.set_name(s_name) if cur_server.get_ipaddress() != s_ip: alt_server.set_ipaddress(s_ip) altered = True # Nothing to update, the server is already idem if altered is False: return False # Perform the update nitro = _connect(**connection_args) if nitro is None: return False ret = True try: NSServer.update(nitro, alt_server) except NSNitroError as error: log.debug('netscaler module error - NSServer.update() failed: %s', error) ret = False _disconnect(nitro) return ret def server_enabled(s_name, **connection_args): ''' Check if a server is enabled globally CLI Example: .. code-block:: bash salt '*' netscaler.server_enabled 'serverName' ''' server = _server_get(s_name, **connection_args) return server is not None and server.get_state() == 'ENABLED' def server_enable(s_name, **connection_args): ''' Enables a server globally CLI Example: .. code-block:: bash salt '*' netscaler.server_enable 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False if server.get_state() == 'ENABLED': return True nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.enable(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def server_disable(s_name, **connection_args): ''' Disable a server globally CLI Example: .. code-block:: bash salt '*' netscaler.server_disable 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False if server.get_state() == 'DISABLED': return True nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.disable(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.disable() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_get(v_name, **connection_args): nitro = _connect(**connection_args) vserver = NSLBVServer() vserver.set_name(v_name) if nitro is None: return None try: vserver = NSLBVServer.get(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSLBVServer.get() failed: %s', error) vserver = None _disconnect(nitro) return vserver def vserver_exists(v_name, v_ip=None, v_port=None, v_type=None, **connection_args): ''' Checks if a vserver exists CLI Example: .. code-block:: bash salt '*' netscaler.vserver_exists 'vserverName' ''' vserver = _vserver_get(v_name, **connection_args) if vserver is None: return False if v_ip is not None and vserver.get_ipv46() != v_ip: return False if v_port is not None and vserver.get_port() != v_port: return False if v_type is not None and vserver.get_servicetype().upper() != v_type.upper(): return False return True def vserver_add(v_name, v_ip, v_port, v_type, **connection_args): ''' Add a new lb vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_add 'vserverName' 'vserverIP' 'vserverPort' 'vserverType' salt '*' netscaler.vserver_add 'alex.patate.chaude.443' '1.2.3.4' '443' 'SSL' ''' ret = True if vserver_exists(v_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vserver = NSLBVServer() vserver.set_name(v_name) vserver.set_ipv46(v_ip) vserver.set_port(v_port) vserver.set_servicetype(v_type.upper()) try: NSLBVServer.add(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSLBVServer.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_delete(v_name, **connection_args): ''' Delete a lb vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_delete 'vserverName' ''' ret = True vserver = _vserver_get(v_name, **connection_args) if vserver is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSLBVServer.delete(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSVServer.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_servicegroup_get(v_name, sg_name, **connection_args): ret = None nitro = _connect(**connection_args) if nitro is None: return None vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) try: vsgs = NSLBVServerServiceGroupBinding.get(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.get() failed: %s', error) return None for vsg in vsgs: if vsg.get_servicegroupname() == sg_name: ret = vsg _disconnect(nitro) return ret def vserver_servicegroup_exists(v_name, sg_name, **connection_args): ''' Checks if a servicegroup is tied to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_exists 'vserverName' 'serviceGroupName' ''' return _vserver_servicegroup_get(v_name, sg_name, **connection_args) is not None def vserver_servicegroup_delete(v_name, sg_name, **connection_args): ''' Unbind a servicegroup from a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_delete 'vserverName' 'serviceGroupName' ''' ret = True if not vserver_servicegroup_exists(v_name, sg_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) vsg.set_servicegroupname(sg_name) try: NSLBVServerServiceGroupBinding.delete(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_sslcert_get(v_name, sc_name, **connection_args): ret = None nitro = _connect(**connection_args) if nitro is None: return None sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) try: sslcerts = NSSSLVServerSSLCertKeyBinding.get(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.get() failed: %s', error) return None for sslcert in sslcerts: if sslcert.get_certkeyname() == sc_name: ret = sslcert return ret def vserver_sslcert_exists(v_name, sc_name, **connection_args): ''' Checks if a SSL certificate is tied to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_exists 'vserverName' 'sslCertificateName' ''' return _vserver_sslcert_get(v_name, sc_name, **connection_args) is not None def vserver_sslcert_add(v_name, sc_name, **connection_args): ''' Binds a SSL certificate to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_add 'vserverName' 'sslCertificateName' ''' ret = True if vserver_sslcert_exists(v_name, sc_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) sslcert.set_certkeyname(sc_name) try: NSSSLVServerSSLCertKeyBinding.add(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_sslcert_delete(v_name, sc_name, **connection_args): ''' Unbinds a SSL certificate from a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_delete 'vserverName' 'sslCertificateName' ''' ret = True if not vserver_sslcert_exists(v_name, sc_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) sslcert.set_certkeyname(sc_name) try: NSSSLVServerSSLCertKeyBinding.delete(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.delete() failed: %s', error) ret = False _disconnect(nitro) return ret
saltstack/salt
salt/modules/netscaler.py
vserver_sslcert_add
python
def vserver_sslcert_add(v_name, sc_name, **connection_args): ''' Binds a SSL certificate to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_add 'vserverName' 'sslCertificateName' ''' ret = True if vserver_sslcert_exists(v_name, sc_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) sslcert.set_certkeyname(sc_name) try: NSSSLVServerSSLCertKeyBinding.add(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.add() failed: %s', error) ret = False _disconnect(nitro) return ret
Binds a SSL certificate to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_add 'vserverName' 'sslCertificateName'
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/netscaler.py#L925-L950
[ "def _connect(**kwargs):\n '''\n Initialise netscaler connection\n '''\n connargs = dict()\n\n # Shamelessy ripped from the mysql module\n def __connarg(name, key=None, default=None):\n '''\n Add key to connargs, only if name exists in our kwargs or as\n netscaler.<name> in __opts__ or __pillar__ Evaluate in said order - kwargs,\n opts then pillar. To avoid collision with other functions, kwargs-based\n connection arguments are prefixed with 'netscaler_' (i.e.\n 'netscaler_host', 'netscaler_user', etc.).\n '''\n if key is None:\n key = name\n if name in kwargs:\n connargs[key] = kwargs[name]\n else:\n prefix = 'netscaler_'\n if name.startswith(prefix):\n try:\n name = name[len(prefix):]\n except IndexError:\n return\n val = __salt__['config.option']('netscaler.{0}'.format(name), None)\n if val is not None:\n connargs[key] = val\n elif default is not None:\n connargs[key] = default\n\n __connarg('netscaler_host', 'host')\n __connarg('netscaler_user', 'user')\n __connarg('netscaler_pass', 'pass')\n __connarg('netscaler_useSSL', 'useSSL', True)\n\n nitro = NSNitro(connargs['host'], connargs['user'], connargs['pass'], connargs['useSSL'])\n try:\n nitro.login()\n except NSNitroError as error:\n log.debug('netscaler module error - NSNitro.login() failed: %s', error)\n return None\n return nitro\n", "def _disconnect(nitro):\n try:\n nitro.logout()\n except NSNitroError as error:\n log.debug('netscaler module error - NSNitro.logout() failed: %s', error)\n return None\n return nitro\n", "def vserver_sslcert_exists(v_name, sc_name, **connection_args):\n '''\n Checks if a SSL certificate is tied to a vserver\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' netscaler.vserver_sslcert_exists 'vserverName' 'sslCertificateName'\n '''\n return _vserver_sslcert_get(v_name, sc_name, **connection_args) is not None\n" ]
# -*- coding: utf-8 -*- ''' Module to provide Citrix Netscaler compatibility to Salt (compatible with netscaler 9.2+) .. versionadded:: 2015.2.0 :depends: - nsnitro Python module .. note:: You can install nsnitro using: .. code-block:: bash pip install nsnitro :configuration: This module accepts connection configuration details either as parameters, or as configuration settings in /etc/salt/minion on the relevant minions .. code-block:: yaml netscaler.host: 1.2.3.4 netscaler.user: user netscaler.pass: password This data can also be passed into pillar. Options passed into opts will overwrite options passed into pillar. :CLI Examples: Calls relying on configuration passed using /etc/salt/minion, grains, or pillars: .. code-block:: bash salt-call netscaler.server_exists server_name Calls passing configuration as opts .. code-block:: bash salt-call netscaler.server_exists server_name netscaler_host=1.2.3.4 netscaler_user=username netscaler_pass=password salt-call netscaler.server_exists server_name netscaler_host=1.2.3.5 netscaler_user=username2 netscaler_pass=password2 salt-call netscaler.server_enable server_name2 netscaler_host=1.2.3.5 salt-call netscaler.server_up server_name3 netscaler_host=1.2.3.6 netscaler_useSSL=False ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import logging # Import Salt libs import salt.utils.platform try: from nsnitro.nsnitro import NSNitro from nsnitro.nsexceptions import NSNitroError from nsnitro.nsresources.nsserver import NSServer from nsnitro.nsresources.nsservice import NSService from nsnitro.nsresources.nsservicegroup import NSServiceGroup from nsnitro.nsresources.nsservicegroupserverbinding import NSServiceGroupServerBinding from nsnitro.nsresources.nslbvserver import NSLBVServer from nsnitro.nsresources.nslbvserverservicegroupbinding import NSLBVServerServiceGroupBinding from nsnitro.nsresources.nssslvserversslcertkeybinding import NSSSLVServerSSLCertKeyBinding HAS_NSNITRO = True except ImportError: HAS_NSNITRO = False log = logging.getLogger(__name__) def __virtual__(): ''' Only load this module if the nsnitro library is installed ''' if salt.utils.platform.is_windows(): return ( False, 'The netscaler execution module failed to load: not available ' 'on Windows.' ) if HAS_NSNITRO: return 'netscaler' return ( False, 'The netscaler execution module failed to load: the nsnitro python ' 'library is not available.' ) def _connect(**kwargs): ''' Initialise netscaler connection ''' connargs = dict() # Shamelessy ripped from the mysql module def __connarg(name, key=None, default=None): ''' Add key to connargs, only if name exists in our kwargs or as netscaler.<name> in __opts__ or __pillar__ Evaluate in said order - kwargs, opts then pillar. To avoid collision with other functions, kwargs-based connection arguments are prefixed with 'netscaler_' (i.e. 'netscaler_host', 'netscaler_user', etc.). ''' if key is None: key = name if name in kwargs: connargs[key] = kwargs[name] else: prefix = 'netscaler_' if name.startswith(prefix): try: name = name[len(prefix):] except IndexError: return val = __salt__['config.option']('netscaler.{0}'.format(name), None) if val is not None: connargs[key] = val elif default is not None: connargs[key] = default __connarg('netscaler_host', 'host') __connarg('netscaler_user', 'user') __connarg('netscaler_pass', 'pass') __connarg('netscaler_useSSL', 'useSSL', True) nitro = NSNitro(connargs['host'], connargs['user'], connargs['pass'], connargs['useSSL']) try: nitro.login() except NSNitroError as error: log.debug('netscaler module error - NSNitro.login() failed: %s', error) return None return nitro def _disconnect(nitro): try: nitro.logout() except NSNitroError as error: log.debug('netscaler module error - NSNitro.logout() failed: %s', error) return None return nitro def _servicegroup_get(sg_name, **connection_args): ''' Return a service group ressource or None ''' nitro = _connect(**connection_args) if nitro is None: return None sg = NSServiceGroup() sg.set_servicegroupname(sg_name) try: sg = NSServiceGroup.get(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.get() failed: %s', error) sg = None _disconnect(nitro) return sg def _servicegroup_get_servers(sg_name, **connection_args): ''' Returns a list of members of a servicegroup or None ''' nitro = _connect(**connection_args) if nitro is None: return None sg = NSServiceGroup() sg.set_servicegroupname(sg_name) try: sg = NSServiceGroup.get_servers(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.get_servers failed(): %s', error) sg = None _disconnect(nitro) return sg def _servicegroup_get_server(sg_name, s_name, s_port=None, **connection_args): ''' Returns a member of a service group or None ''' ret = None servers = _servicegroup_get_servers(sg_name, **connection_args) if servers is None: return None for server in servers: if server.get_servername() == s_name: if s_port is not None and s_port != server.get_port(): ret = None ret = server return ret def servicegroup_exists(sg_name, sg_type=None, **connection_args): ''' Checks if a service group exists CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_exists 'serviceGroupName' ''' sg = _servicegroup_get(sg_name, **connection_args) if sg is None: return False if sg_type is not None and sg_type.upper() != sg.get_servicetype(): return False return True def servicegroup_add(sg_name, sg_type='HTTP', **connection_args): ''' Add a new service group If no service type is specified, HTTP will be used. Most common service types: HTTP, SSL, and SSL_BRIDGE CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_add 'serviceGroupName' salt '*' netscaler.servicegroup_add 'serviceGroupName' 'serviceGroupType' ''' ret = True if servicegroup_exists(sg_name): return False nitro = _connect(**connection_args) if nitro is None: return False sg = NSServiceGroup() sg.set_servicegroupname(sg_name) sg.set_servicetype(sg_type.upper()) try: NSServiceGroup.add(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.add() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_delete(sg_name, **connection_args): ''' Delete a new service group CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_delete 'serviceGroupName' ''' ret = True sg = _servicegroup_get(sg_name, **connection_args) if sg is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.delete(nitro, sg) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_exists(sg_name, s_name, s_port=None, **connection_args): ''' Check if a server:port combination is a member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_exists 'serviceGroupName' 'serverName' 'serverPort' ''' return _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) is not None def servicegroup_server_up(sg_name, s_name, s_port, **connection_args): ''' Check if a server:port combination is in state UP in a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_up 'serviceGroupName' 'serverName' 'serverPort' ''' server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) return server is not None and server.get_svrstate() == 'UP' def servicegroup_server_enable(sg_name, s_name, s_port, **connection_args): ''' Enable a server:port member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_enable 'serviceGroupName' 'serverName' 'serverPort' ''' ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.enable_server(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.enable_server() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_disable(sg_name, s_name, s_port, **connection_args): ''' Disable a server:port member of a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_disable 'serviceGroupName' 'serverName' 'serverPort' ''' ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServiceGroup.disable_server(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroup.disable_server() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_add(sg_name, s_name, s_port, **connection_args): ''' Add a server:port member to a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_add 'serviceGroupName' 'serverName' 'serverPort' ''' # Nitro will throw an error if the server is already present ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is not None: return False nitro = _connect(**connection_args) if nitro is None: return False sgsb = NSServiceGroupServerBinding() sgsb.set_servicegroupname(sg_name) sgsb.set_servername(s_name) sgsb.set_port(s_port) try: NSServiceGroupServerBinding.add(nitro, sgsb) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroupServerBinding() failed: %s', error) ret = False _disconnect(nitro) return ret def servicegroup_server_delete(sg_name, s_name, s_port, **connection_args): ''' Remove a server:port member from a servicegroup CLI Example: .. code-block:: bash salt '*' netscaler.servicegroup_server_delete 'serviceGroupName' 'serverName' 'serverPort' ''' # Nitro will throw an error if the server is already not present ret = True server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False sgsb = NSServiceGroupServerBinding() sgsb.set_servicegroupname(sg_name) sgsb.set_servername(s_name) sgsb.set_port(s_port) try: NSServiceGroupServerBinding.delete(nitro, sgsb) except NSNitroError as error: log.debug('netscaler module error - NSServiceGroupServerBinding() failed: %s', error) ret = False _disconnect(nitro) return ret def _service_get(s_name, **connection_args): ''' Returns a service ressource or None ''' nitro = _connect(**connection_args) if nitro is None: return None service = NSService() service.set_name(s_name) try: service = NSService.get(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.get() failed: %s', error) service = None _disconnect(nitro) return service def service_exists(s_name, **connection_args): ''' Checks if a service exists CLI Example: .. code-block:: bash salt '*' netscaler.service_exists 'serviceName' ''' return _service_get(s_name, **connection_args) is not None def service_up(s_name, **connection_args): ''' Checks if a service is UP CLI Example: .. code-block:: bash salt '*' netscaler.service_up 'serviceName' ''' service = _service_get(s_name, **connection_args) return service is not None and service.get_svrstate() == 'UP' def service_enable(s_name, **connection_args): ''' Enable a service CLI Example: .. code-block:: bash salt '*' netscaler.service_enable 'serviceName' ''' ret = True service = _service_get(s_name, **connection_args) if service is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSService.enable(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def service_disable(s_name, s_delay=None, **connection_args): ''' Disable a service CLI Example: .. code-block:: bash salt '*' netscaler.service_disable 'serviceName' salt '*' netscaler.service_disable 'serviceName' 'delayInSeconds' ''' ret = True service = _service_get(s_name, **connection_args) if service is None: return False if s_delay is not None: service.set_delay(s_delay) nitro = _connect(**connection_args) if nitro is None: return False try: NSService.disable(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def _server_get(s_name, **connection_args): nitro = _connect(**connection_args) if nitro is None: return None server = NSServer() server.set_name(s_name) try: server = NSServer.get(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.get() failed: %s', error) server = None _disconnect(nitro) return server def server_exists(s_name, ip=None, s_state=None, **connection_args): ''' Checks if a server exists CLI Example: .. code-block:: bash salt '*' netscaler.server_exists 'serverName' ''' server = _server_get(s_name, **connection_args) if server is None: return False if ip is not None and ip != server.get_ipaddress(): return False if s_state is not None and s_state.upper() != server.get_state(): return False return True def server_add(s_name, s_ip, s_state=None, **connection_args): ''' Add a server Note: The default server state is ENABLED CLI Example: .. code-block:: bash salt '*' netscaler.server_add 'serverName' 'serverIpAddress' salt '*' netscaler.server_add 'serverName' 'serverIpAddress' 'serverState' ''' ret = True if server_exists(s_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False server = NSServer() server.set_name(s_name) server.set_ipaddress(s_ip) if s_state is not None: server.set_state(s_state) try: NSServer.add(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.add() failed: %s', error) ret = False _disconnect(nitro) return ret def server_delete(s_name, **connection_args): ''' Delete a server CLI Example: .. code-block:: bash salt '*' netscaler.server_delete 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.delete(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def server_update(s_name, s_ip, **connection_args): ''' Update a server's attributes CLI Example: .. code-block:: bash salt '*' netscaler.server_update 'serverName' 'serverIP' ''' altered = False cur_server = _server_get(s_name, **connection_args) if cur_server is None: return False alt_server = NSServer() alt_server.set_name(s_name) if cur_server.get_ipaddress() != s_ip: alt_server.set_ipaddress(s_ip) altered = True # Nothing to update, the server is already idem if altered is False: return False # Perform the update nitro = _connect(**connection_args) if nitro is None: return False ret = True try: NSServer.update(nitro, alt_server) except NSNitroError as error: log.debug('netscaler module error - NSServer.update() failed: %s', error) ret = False _disconnect(nitro) return ret def server_enabled(s_name, **connection_args): ''' Check if a server is enabled globally CLI Example: .. code-block:: bash salt '*' netscaler.server_enabled 'serverName' ''' server = _server_get(s_name, **connection_args) return server is not None and server.get_state() == 'ENABLED' def server_enable(s_name, **connection_args): ''' Enables a server globally CLI Example: .. code-block:: bash salt '*' netscaler.server_enable 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False if server.get_state() == 'ENABLED': return True nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.enable(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.enable() failed: %s', error) ret = False _disconnect(nitro) return ret def server_disable(s_name, **connection_args): ''' Disable a server globally CLI Example: .. code-block:: bash salt '*' netscaler.server_disable 'serverName' ''' ret = True server = _server_get(s_name, **connection_args) if server is None: return False if server.get_state() == 'DISABLED': return True nitro = _connect(**connection_args) if nitro is None: return False try: NSServer.disable(nitro, server) except NSNitroError as error: log.debug('netscaler module error - NSServer.disable() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_get(v_name, **connection_args): nitro = _connect(**connection_args) vserver = NSLBVServer() vserver.set_name(v_name) if nitro is None: return None try: vserver = NSLBVServer.get(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSLBVServer.get() failed: %s', error) vserver = None _disconnect(nitro) return vserver def vserver_exists(v_name, v_ip=None, v_port=None, v_type=None, **connection_args): ''' Checks if a vserver exists CLI Example: .. code-block:: bash salt '*' netscaler.vserver_exists 'vserverName' ''' vserver = _vserver_get(v_name, **connection_args) if vserver is None: return False if v_ip is not None and vserver.get_ipv46() != v_ip: return False if v_port is not None and vserver.get_port() != v_port: return False if v_type is not None and vserver.get_servicetype().upper() != v_type.upper(): return False return True def vserver_add(v_name, v_ip, v_port, v_type, **connection_args): ''' Add a new lb vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_add 'vserverName' 'vserverIP' 'vserverPort' 'vserverType' salt '*' netscaler.vserver_add 'alex.patate.chaude.443' '1.2.3.4' '443' 'SSL' ''' ret = True if vserver_exists(v_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vserver = NSLBVServer() vserver.set_name(v_name) vserver.set_ipv46(v_ip) vserver.set_port(v_port) vserver.set_servicetype(v_type.upper()) try: NSLBVServer.add(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSLBVServer.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_delete(v_name, **connection_args): ''' Delete a lb vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_delete 'vserverName' ''' ret = True vserver = _vserver_get(v_name, **connection_args) if vserver is None: return False nitro = _connect(**connection_args) if nitro is None: return False try: NSLBVServer.delete(nitro, vserver) except NSNitroError as error: log.debug('netscaler module error - NSVServer.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_servicegroup_get(v_name, sg_name, **connection_args): ret = None nitro = _connect(**connection_args) if nitro is None: return None vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) try: vsgs = NSLBVServerServiceGroupBinding.get(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.get() failed: %s', error) return None for vsg in vsgs: if vsg.get_servicegroupname() == sg_name: ret = vsg _disconnect(nitro) return ret def vserver_servicegroup_exists(v_name, sg_name, **connection_args): ''' Checks if a servicegroup is tied to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_exists 'vserverName' 'serviceGroupName' ''' return _vserver_servicegroup_get(v_name, sg_name, **connection_args) is not None def vserver_servicegroup_add(v_name, sg_name, **connection_args): ''' Bind a servicegroup to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_add 'vserverName' 'serviceGroupName' ''' ret = True if vserver_servicegroup_exists(v_name, sg_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) vsg.set_servicegroupname(sg_name) try: NSLBVServerServiceGroupBinding.add(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.add() failed: %s', error) ret = False _disconnect(nitro) return ret def vserver_servicegroup_delete(v_name, sg_name, **connection_args): ''' Unbind a servicegroup from a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_servicegroup_delete 'vserverName' 'serviceGroupName' ''' ret = True if not vserver_servicegroup_exists(v_name, sg_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False vsg = NSLBVServerServiceGroupBinding() vsg.set_name(v_name) vsg.set_servicegroupname(sg_name) try: NSLBVServerServiceGroupBinding.delete(nitro, vsg) except NSNitroError as error: log.debug('netscaler module error - NSLBVServerServiceGroupBinding.delete() failed: %s', error) ret = False _disconnect(nitro) return ret def _vserver_sslcert_get(v_name, sc_name, **connection_args): ret = None nitro = _connect(**connection_args) if nitro is None: return None sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) try: sslcerts = NSSSLVServerSSLCertKeyBinding.get(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.get() failed: %s', error) return None for sslcert in sslcerts: if sslcert.get_certkeyname() == sc_name: ret = sslcert return ret def vserver_sslcert_exists(v_name, sc_name, **connection_args): ''' Checks if a SSL certificate is tied to a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_exists 'vserverName' 'sslCertificateName' ''' return _vserver_sslcert_get(v_name, sc_name, **connection_args) is not None def vserver_sslcert_delete(v_name, sc_name, **connection_args): ''' Unbinds a SSL certificate from a vserver CLI Example: .. code-block:: bash salt '*' netscaler.vserver_sslcert_delete 'vserverName' 'sslCertificateName' ''' ret = True if not vserver_sslcert_exists(v_name, sc_name, **connection_args): return False nitro = _connect(**connection_args) if nitro is None: return False sslcert = NSSSLVServerSSLCertKeyBinding() sslcert.set_vservername(v_name) sslcert.set_certkeyname(sc_name) try: NSSSLVServerSSLCertKeyBinding.delete(nitro, sslcert) except NSNitroError as error: log.debug('netscaler module error - NSSSLVServerSSLCertKeyBinding.delete() failed: %s', error) ret = False _disconnect(nitro) return ret
saltstack/salt
salt/modules/libcloud_dns.py
list_zones
python
def list_zones(profile): ''' List zones for the given profile :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.list_zones profile1 ''' conn = _get_driver(profile=profile) return [_simple_zone(zone) for zone in conn.list_zones()]
List zones for the given profile :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.list_zones profile1
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/libcloud_dns.py#L103-L117
[ "def _get_driver(profile):\n config = __salt__['config.option']('libcloud_dns')[profile]\n cls = get_driver(config['driver'])\n args = config.copy()\n del args['driver']\n args['key'] = config.get('key')\n args['secret'] = config.get('secret', None)\n args['secure'] = config.get('secure', True)\n args['host'] = config.get('host', None)\n args['port'] = config.get('port', None)\n return cls(**args)\n" ]
# -*- coding: utf-8 -*- ''' Apache Libcloud DNS Management ============================== Connection module for Apache Libcloud DNS management .. versionadded:: 2016.11.0 :configuration: This module uses a configuration profile for one or multiple DNS providers .. code-block:: yaml libcloud_dns: profile_test1: driver: cloudflare key: 12345 secret: mysecret profile_test2: driver: godaddy key: 12345 secret: mysecret shopper_id: 12345 :depends: apache-libcloud ''' # keep lint from choking on _get_conn and _cache_id #pylint: disable=E0602 from __future__ import absolute_import, unicode_literals, print_function # Import Python libs import logging # Import salt libs import salt.utils.compat from salt.utils.versions import LooseVersion as _LooseVersion log = logging.getLogger(__name__) # Import third party libs REQUIRED_LIBCLOUD_VERSION = '2.0.0' try: #pylint: disable=unused-import import libcloud from libcloud.dns.providers import get_driver from libcloud.dns.types import RecordType #pylint: enable=unused-import if hasattr(libcloud, '__version__') and _LooseVersion(libcloud.__version__) < _LooseVersion(REQUIRED_LIBCLOUD_VERSION): raise ImportError() logging.getLogger('libcloud').setLevel(logging.CRITICAL) HAS_LIBCLOUD = True except ImportError: HAS_LIBCLOUD = False def __virtual__(): ''' Only load if libcloud libraries exist. ''' if not HAS_LIBCLOUD: msg = ('A apache-libcloud library with version at least {0} was not ' 'found').format(REQUIRED_LIBCLOUD_VERSION) return (False, msg) return True def __init__(opts): salt.utils.compat.pack_dunder(__name__) def _get_driver(profile): config = __salt__['config.option']('libcloud_dns')[profile] cls = get_driver(config['driver']) args = config.copy() del args['driver'] args['key'] = config.get('key') args['secret'] = config.get('secret', None) args['secure'] = config.get('secure', True) args['host'] = config.get('host', None) args['port'] = config.get('port', None) return cls(**args) def list_record_types(profile): ''' List available record types for the given profile, e.g. A, AAAA :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.list_record_types profile1 ''' conn = _get_driver(profile=profile) return conn.list_record_types() def list_records(zone_id, profile, type=None): ''' List records for the given zone_id on the given profile :param zone_id: Zone to export. :type zone_id: ``str`` :param profile: The profile key :type profile: ``str`` :param type: The record type, e.g. A, NS :type type: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.list_records google.com profile1 ''' conn = _get_driver(profile=profile) zone = conn.get_zone(zone_id) if type is not None: return [_simple_record(record) for record in conn.list_records(zone) if record.type == type] else: return [_simple_record(record) for record in conn.list_records(zone)] def get_zone(zone_id, profile): ''' Get zone information for the given zone_id on the given profile :param zone_id: Zone to export. :type zone_id: ``str`` :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.get_zone google.com profile1 ''' conn = _get_driver(profile=profile) return _simple_zone(conn.get_zone(zone_id)) def get_record(zone_id, record_id, profile): ''' Get record information for the given zone_id on the given profile :param zone_id: Zone to export. :type zone_id: ``str`` :param record_id: Record to delete. :type record_id: ``str`` :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.get_record google.com www profile1 ''' conn = _get_driver(profile=profile) return _simple_record(conn.get_record(zone_id, record_id)) def create_zone(domain, profile, type='master', ttl=None): ''' Create a new zone. :param domain: Zone domain name (e.g. example.com) :type domain: ``str`` :param profile: The profile key :type profile: ``str`` :param type: Zone type (master / slave). :type type: ``str`` :param ttl: TTL for new records. (optional) :type ttl: ``int`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.create_zone google.com profile1 ''' conn = _get_driver(profile=profile) zone = conn.create_record(domain, type=type, ttl=ttl) return _simple_zone(zone) def update_zone(zone_id, domain, profile, type='master', ttl=None): ''' Update an existing zone. :param zone_id: Zone ID to update. :type zone_id: ``str`` :param domain: Zone domain name (e.g. example.com) :type domain: ``str`` :param profile: The profile key :type profile: ``str`` :param type: Zone type (master / slave). :type type: ``str`` :param ttl: TTL for new records. (optional) :type ttl: ``int`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.update_zone google.com google.com profile1 type=slave ''' conn = _get_driver(profile=profile) zone = conn.get_zone(zone_id) return _simple_zone(conn.update_zone(zone=zone, domain=domain, type=type, ttl=ttl)) def create_record(name, zone_id, type, data, profile): ''' Create a new record. :param name: Record name without the domain name (e.g. www). Note: If you want to create a record for a base domain name, you should specify empty string ('') for this argument. :type name: ``str`` :param zone_id: Zone where the requested record is created. :type zone_id: ``str`` :param type: DNS record type (A, AAAA, ...). :type type: ``str`` :param data: Data for the record (depends on the record type). :type data: ``str`` :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.create_record www google.com A 12.32.12.2 profile1 ''' conn = _get_driver(profile=profile) record_type = _string_to_record_type(type) zone = conn.get_zone(zone_id) return _simple_record(conn.create_record(name, zone, record_type, data)) def delete_zone(zone_id, profile): ''' Delete a zone. :param zone_id: Zone to delete. :type zone_id: ``str`` :param profile: The profile key :type profile: ``str`` :rtype: ``bool`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.delete_zone google.com profile1 ''' conn = _get_driver(profile=profile) zone = conn.get_zone(zone_id=zone_id) return conn.delete_zone(zone) def delete_record(zone_id, record_id, profile): ''' Delete a record. :param zone_id: Zone to delete. :type zone_id: ``str`` :param record_id: Record to delete. :type record_id: ``str`` :param profile: The profile key :type profile: ``str`` :rtype: ``bool`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.delete_record google.com www profile1 ''' conn = _get_driver(profile=profile) record = conn.get_record(zone_id=zone_id, record_id=record_id) return conn.delete_record(record) def get_bind_data(zone_id, profile): ''' Export Zone to the BIND compatible format. :param zone_id: Zone to export. :type zone_id: ``str`` :param profile: The profile key :type profile: ``str`` :return: Zone data in BIND compatible format. :rtype: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.get_bind_data google.com profile1 ''' conn = _get_driver(profile=profile) zone = conn.get_zone(zone_id) return conn.export_zone_to_bind_format(zone) def extra(method, profile, **libcloud_kwargs): ''' Call an extended method on the driver :param method: Driver's method name :type method: ``str`` :param profile: The profile key :type profile: ``str`` :param libcloud_kwargs: Extra arguments for the driver's delete_container method :type libcloud_kwargs: ``dict`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.extra ex_get_permissions google container_name=my_container object_name=me.jpg --out=yaml ''' _sanitize_kwargs(libcloud_kwargs) conn = _get_driver(profile=profile) connection_method = getattr(conn, method) return connection_method(**libcloud_kwargs) def _string_to_record_type(string): ''' Return a string representation of a DNS record type to a libcloud RecordType ENUM. :param string: A record type, e.g. A, TXT, NS :type string: ``str`` :rtype: :class:`RecordType` ''' string = string.upper() record_type = getattr(RecordType, string) return record_type def _simple_zone(zone): return { 'id': zone.id, 'domain': zone.domain, 'type': zone.type, 'ttl': zone.ttl, 'extra': zone.extra } def _simple_record(record): return { 'id': record.id, 'name': record.name, 'type': record.type, 'data': record.data, 'zone': _simple_zone(record.zone), 'ttl': record.ttl, 'extra': record.extra }
saltstack/salt
salt/modules/libcloud_dns.py
list_records
python
def list_records(zone_id, profile, type=None): ''' List records for the given zone_id on the given profile :param zone_id: Zone to export. :type zone_id: ``str`` :param profile: The profile key :type profile: ``str`` :param type: The record type, e.g. A, NS :type type: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.list_records google.com profile1 ''' conn = _get_driver(profile=profile) zone = conn.get_zone(zone_id) if type is not None: return [_simple_record(record) for record in conn.list_records(zone) if record.type == type] else: return [_simple_record(record) for record in conn.list_records(zone)]
List records for the given zone_id on the given profile :param zone_id: Zone to export. :type zone_id: ``str`` :param profile: The profile key :type profile: ``str`` :param type: The record type, e.g. A, NS :type type: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.list_records google.com profile1
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/libcloud_dns.py#L120-L144
[ "def _get_driver(profile):\n config = __salt__['config.option']('libcloud_dns')[profile]\n cls = get_driver(config['driver'])\n args = config.copy()\n del args['driver']\n args['key'] = config.get('key')\n args['secret'] = config.get('secret', None)\n args['secure'] = config.get('secure', True)\n args['host'] = config.get('host', None)\n args['port'] = config.get('port', None)\n return cls(**args)\n" ]
# -*- coding: utf-8 -*- ''' Apache Libcloud DNS Management ============================== Connection module for Apache Libcloud DNS management .. versionadded:: 2016.11.0 :configuration: This module uses a configuration profile for one or multiple DNS providers .. code-block:: yaml libcloud_dns: profile_test1: driver: cloudflare key: 12345 secret: mysecret profile_test2: driver: godaddy key: 12345 secret: mysecret shopper_id: 12345 :depends: apache-libcloud ''' # keep lint from choking on _get_conn and _cache_id #pylint: disable=E0602 from __future__ import absolute_import, unicode_literals, print_function # Import Python libs import logging # Import salt libs import salt.utils.compat from salt.utils.versions import LooseVersion as _LooseVersion log = logging.getLogger(__name__) # Import third party libs REQUIRED_LIBCLOUD_VERSION = '2.0.0' try: #pylint: disable=unused-import import libcloud from libcloud.dns.providers import get_driver from libcloud.dns.types import RecordType #pylint: enable=unused-import if hasattr(libcloud, '__version__') and _LooseVersion(libcloud.__version__) < _LooseVersion(REQUIRED_LIBCLOUD_VERSION): raise ImportError() logging.getLogger('libcloud').setLevel(logging.CRITICAL) HAS_LIBCLOUD = True except ImportError: HAS_LIBCLOUD = False def __virtual__(): ''' Only load if libcloud libraries exist. ''' if not HAS_LIBCLOUD: msg = ('A apache-libcloud library with version at least {0} was not ' 'found').format(REQUIRED_LIBCLOUD_VERSION) return (False, msg) return True def __init__(opts): salt.utils.compat.pack_dunder(__name__) def _get_driver(profile): config = __salt__['config.option']('libcloud_dns')[profile] cls = get_driver(config['driver']) args = config.copy() del args['driver'] args['key'] = config.get('key') args['secret'] = config.get('secret', None) args['secure'] = config.get('secure', True) args['host'] = config.get('host', None) args['port'] = config.get('port', None) return cls(**args) def list_record_types(profile): ''' List available record types for the given profile, e.g. A, AAAA :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.list_record_types profile1 ''' conn = _get_driver(profile=profile) return conn.list_record_types() def list_zones(profile): ''' List zones for the given profile :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.list_zones profile1 ''' conn = _get_driver(profile=profile) return [_simple_zone(zone) for zone in conn.list_zones()] def get_zone(zone_id, profile): ''' Get zone information for the given zone_id on the given profile :param zone_id: Zone to export. :type zone_id: ``str`` :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.get_zone google.com profile1 ''' conn = _get_driver(profile=profile) return _simple_zone(conn.get_zone(zone_id)) def get_record(zone_id, record_id, profile): ''' Get record information for the given zone_id on the given profile :param zone_id: Zone to export. :type zone_id: ``str`` :param record_id: Record to delete. :type record_id: ``str`` :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.get_record google.com www profile1 ''' conn = _get_driver(profile=profile) return _simple_record(conn.get_record(zone_id, record_id)) def create_zone(domain, profile, type='master', ttl=None): ''' Create a new zone. :param domain: Zone domain name (e.g. example.com) :type domain: ``str`` :param profile: The profile key :type profile: ``str`` :param type: Zone type (master / slave). :type type: ``str`` :param ttl: TTL for new records. (optional) :type ttl: ``int`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.create_zone google.com profile1 ''' conn = _get_driver(profile=profile) zone = conn.create_record(domain, type=type, ttl=ttl) return _simple_zone(zone) def update_zone(zone_id, domain, profile, type='master', ttl=None): ''' Update an existing zone. :param zone_id: Zone ID to update. :type zone_id: ``str`` :param domain: Zone domain name (e.g. example.com) :type domain: ``str`` :param profile: The profile key :type profile: ``str`` :param type: Zone type (master / slave). :type type: ``str`` :param ttl: TTL for new records. (optional) :type ttl: ``int`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.update_zone google.com google.com profile1 type=slave ''' conn = _get_driver(profile=profile) zone = conn.get_zone(zone_id) return _simple_zone(conn.update_zone(zone=zone, domain=domain, type=type, ttl=ttl)) def create_record(name, zone_id, type, data, profile): ''' Create a new record. :param name: Record name without the domain name (e.g. www). Note: If you want to create a record for a base domain name, you should specify empty string ('') for this argument. :type name: ``str`` :param zone_id: Zone where the requested record is created. :type zone_id: ``str`` :param type: DNS record type (A, AAAA, ...). :type type: ``str`` :param data: Data for the record (depends on the record type). :type data: ``str`` :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.create_record www google.com A 12.32.12.2 profile1 ''' conn = _get_driver(profile=profile) record_type = _string_to_record_type(type) zone = conn.get_zone(zone_id) return _simple_record(conn.create_record(name, zone, record_type, data)) def delete_zone(zone_id, profile): ''' Delete a zone. :param zone_id: Zone to delete. :type zone_id: ``str`` :param profile: The profile key :type profile: ``str`` :rtype: ``bool`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.delete_zone google.com profile1 ''' conn = _get_driver(profile=profile) zone = conn.get_zone(zone_id=zone_id) return conn.delete_zone(zone) def delete_record(zone_id, record_id, profile): ''' Delete a record. :param zone_id: Zone to delete. :type zone_id: ``str`` :param record_id: Record to delete. :type record_id: ``str`` :param profile: The profile key :type profile: ``str`` :rtype: ``bool`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.delete_record google.com www profile1 ''' conn = _get_driver(profile=profile) record = conn.get_record(zone_id=zone_id, record_id=record_id) return conn.delete_record(record) def get_bind_data(zone_id, profile): ''' Export Zone to the BIND compatible format. :param zone_id: Zone to export. :type zone_id: ``str`` :param profile: The profile key :type profile: ``str`` :return: Zone data in BIND compatible format. :rtype: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.get_bind_data google.com profile1 ''' conn = _get_driver(profile=profile) zone = conn.get_zone(zone_id) return conn.export_zone_to_bind_format(zone) def extra(method, profile, **libcloud_kwargs): ''' Call an extended method on the driver :param method: Driver's method name :type method: ``str`` :param profile: The profile key :type profile: ``str`` :param libcloud_kwargs: Extra arguments for the driver's delete_container method :type libcloud_kwargs: ``dict`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.extra ex_get_permissions google container_name=my_container object_name=me.jpg --out=yaml ''' _sanitize_kwargs(libcloud_kwargs) conn = _get_driver(profile=profile) connection_method = getattr(conn, method) return connection_method(**libcloud_kwargs) def _string_to_record_type(string): ''' Return a string representation of a DNS record type to a libcloud RecordType ENUM. :param string: A record type, e.g. A, TXT, NS :type string: ``str`` :rtype: :class:`RecordType` ''' string = string.upper() record_type = getattr(RecordType, string) return record_type def _simple_zone(zone): return { 'id': zone.id, 'domain': zone.domain, 'type': zone.type, 'ttl': zone.ttl, 'extra': zone.extra } def _simple_record(record): return { 'id': record.id, 'name': record.name, 'type': record.type, 'data': record.data, 'zone': _simple_zone(record.zone), 'ttl': record.ttl, 'extra': record.extra }
saltstack/salt
salt/modules/libcloud_dns.py
get_zone
python
def get_zone(zone_id, profile): ''' Get zone information for the given zone_id on the given profile :param zone_id: Zone to export. :type zone_id: ``str`` :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.get_zone google.com profile1 ''' conn = _get_driver(profile=profile) return _simple_zone(conn.get_zone(zone_id))
Get zone information for the given zone_id on the given profile :param zone_id: Zone to export. :type zone_id: ``str`` :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.get_zone google.com profile1
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/libcloud_dns.py#L147-L164
[ "def _get_driver(profile):\n config = __salt__['config.option']('libcloud_dns')[profile]\n cls = get_driver(config['driver'])\n args = config.copy()\n del args['driver']\n args['key'] = config.get('key')\n args['secret'] = config.get('secret', None)\n args['secure'] = config.get('secure', True)\n args['host'] = config.get('host', None)\n args['port'] = config.get('port', None)\n return cls(**args)\n", "def _simple_zone(zone):\n return {\n 'id': zone.id,\n 'domain': zone.domain,\n 'type': zone.type,\n 'ttl': zone.ttl,\n 'extra': zone.extra\n }\n" ]
# -*- coding: utf-8 -*- ''' Apache Libcloud DNS Management ============================== Connection module for Apache Libcloud DNS management .. versionadded:: 2016.11.0 :configuration: This module uses a configuration profile for one or multiple DNS providers .. code-block:: yaml libcloud_dns: profile_test1: driver: cloudflare key: 12345 secret: mysecret profile_test2: driver: godaddy key: 12345 secret: mysecret shopper_id: 12345 :depends: apache-libcloud ''' # keep lint from choking on _get_conn and _cache_id #pylint: disable=E0602 from __future__ import absolute_import, unicode_literals, print_function # Import Python libs import logging # Import salt libs import salt.utils.compat from salt.utils.versions import LooseVersion as _LooseVersion log = logging.getLogger(__name__) # Import third party libs REQUIRED_LIBCLOUD_VERSION = '2.0.0' try: #pylint: disable=unused-import import libcloud from libcloud.dns.providers import get_driver from libcloud.dns.types import RecordType #pylint: enable=unused-import if hasattr(libcloud, '__version__') and _LooseVersion(libcloud.__version__) < _LooseVersion(REQUIRED_LIBCLOUD_VERSION): raise ImportError() logging.getLogger('libcloud').setLevel(logging.CRITICAL) HAS_LIBCLOUD = True except ImportError: HAS_LIBCLOUD = False def __virtual__(): ''' Only load if libcloud libraries exist. ''' if not HAS_LIBCLOUD: msg = ('A apache-libcloud library with version at least {0} was not ' 'found').format(REQUIRED_LIBCLOUD_VERSION) return (False, msg) return True def __init__(opts): salt.utils.compat.pack_dunder(__name__) def _get_driver(profile): config = __salt__['config.option']('libcloud_dns')[profile] cls = get_driver(config['driver']) args = config.copy() del args['driver'] args['key'] = config.get('key') args['secret'] = config.get('secret', None) args['secure'] = config.get('secure', True) args['host'] = config.get('host', None) args['port'] = config.get('port', None) return cls(**args) def list_record_types(profile): ''' List available record types for the given profile, e.g. A, AAAA :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.list_record_types profile1 ''' conn = _get_driver(profile=profile) return conn.list_record_types() def list_zones(profile): ''' List zones for the given profile :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.list_zones profile1 ''' conn = _get_driver(profile=profile) return [_simple_zone(zone) for zone in conn.list_zones()] def list_records(zone_id, profile, type=None): ''' List records for the given zone_id on the given profile :param zone_id: Zone to export. :type zone_id: ``str`` :param profile: The profile key :type profile: ``str`` :param type: The record type, e.g. A, NS :type type: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.list_records google.com profile1 ''' conn = _get_driver(profile=profile) zone = conn.get_zone(zone_id) if type is not None: return [_simple_record(record) for record in conn.list_records(zone) if record.type == type] else: return [_simple_record(record) for record in conn.list_records(zone)] def get_record(zone_id, record_id, profile): ''' Get record information for the given zone_id on the given profile :param zone_id: Zone to export. :type zone_id: ``str`` :param record_id: Record to delete. :type record_id: ``str`` :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.get_record google.com www profile1 ''' conn = _get_driver(profile=profile) return _simple_record(conn.get_record(zone_id, record_id)) def create_zone(domain, profile, type='master', ttl=None): ''' Create a new zone. :param domain: Zone domain name (e.g. example.com) :type domain: ``str`` :param profile: The profile key :type profile: ``str`` :param type: Zone type (master / slave). :type type: ``str`` :param ttl: TTL for new records. (optional) :type ttl: ``int`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.create_zone google.com profile1 ''' conn = _get_driver(profile=profile) zone = conn.create_record(domain, type=type, ttl=ttl) return _simple_zone(zone) def update_zone(zone_id, domain, profile, type='master', ttl=None): ''' Update an existing zone. :param zone_id: Zone ID to update. :type zone_id: ``str`` :param domain: Zone domain name (e.g. example.com) :type domain: ``str`` :param profile: The profile key :type profile: ``str`` :param type: Zone type (master / slave). :type type: ``str`` :param ttl: TTL for new records. (optional) :type ttl: ``int`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.update_zone google.com google.com profile1 type=slave ''' conn = _get_driver(profile=profile) zone = conn.get_zone(zone_id) return _simple_zone(conn.update_zone(zone=zone, domain=domain, type=type, ttl=ttl)) def create_record(name, zone_id, type, data, profile): ''' Create a new record. :param name: Record name without the domain name (e.g. www). Note: If you want to create a record for a base domain name, you should specify empty string ('') for this argument. :type name: ``str`` :param zone_id: Zone where the requested record is created. :type zone_id: ``str`` :param type: DNS record type (A, AAAA, ...). :type type: ``str`` :param data: Data for the record (depends on the record type). :type data: ``str`` :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.create_record www google.com A 12.32.12.2 profile1 ''' conn = _get_driver(profile=profile) record_type = _string_to_record_type(type) zone = conn.get_zone(zone_id) return _simple_record(conn.create_record(name, zone, record_type, data)) def delete_zone(zone_id, profile): ''' Delete a zone. :param zone_id: Zone to delete. :type zone_id: ``str`` :param profile: The profile key :type profile: ``str`` :rtype: ``bool`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.delete_zone google.com profile1 ''' conn = _get_driver(profile=profile) zone = conn.get_zone(zone_id=zone_id) return conn.delete_zone(zone) def delete_record(zone_id, record_id, profile): ''' Delete a record. :param zone_id: Zone to delete. :type zone_id: ``str`` :param record_id: Record to delete. :type record_id: ``str`` :param profile: The profile key :type profile: ``str`` :rtype: ``bool`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.delete_record google.com www profile1 ''' conn = _get_driver(profile=profile) record = conn.get_record(zone_id=zone_id, record_id=record_id) return conn.delete_record(record) def get_bind_data(zone_id, profile): ''' Export Zone to the BIND compatible format. :param zone_id: Zone to export. :type zone_id: ``str`` :param profile: The profile key :type profile: ``str`` :return: Zone data in BIND compatible format. :rtype: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.get_bind_data google.com profile1 ''' conn = _get_driver(profile=profile) zone = conn.get_zone(zone_id) return conn.export_zone_to_bind_format(zone) def extra(method, profile, **libcloud_kwargs): ''' Call an extended method on the driver :param method: Driver's method name :type method: ``str`` :param profile: The profile key :type profile: ``str`` :param libcloud_kwargs: Extra arguments for the driver's delete_container method :type libcloud_kwargs: ``dict`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.extra ex_get_permissions google container_name=my_container object_name=me.jpg --out=yaml ''' _sanitize_kwargs(libcloud_kwargs) conn = _get_driver(profile=profile) connection_method = getattr(conn, method) return connection_method(**libcloud_kwargs) def _string_to_record_type(string): ''' Return a string representation of a DNS record type to a libcloud RecordType ENUM. :param string: A record type, e.g. A, TXT, NS :type string: ``str`` :rtype: :class:`RecordType` ''' string = string.upper() record_type = getattr(RecordType, string) return record_type def _simple_zone(zone): return { 'id': zone.id, 'domain': zone.domain, 'type': zone.type, 'ttl': zone.ttl, 'extra': zone.extra } def _simple_record(record): return { 'id': record.id, 'name': record.name, 'type': record.type, 'data': record.data, 'zone': _simple_zone(record.zone), 'ttl': record.ttl, 'extra': record.extra }
saltstack/salt
salt/modules/libcloud_dns.py
get_record
python
def get_record(zone_id, record_id, profile): ''' Get record information for the given zone_id on the given profile :param zone_id: Zone to export. :type zone_id: ``str`` :param record_id: Record to delete. :type record_id: ``str`` :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.get_record google.com www profile1 ''' conn = _get_driver(profile=profile) return _simple_record(conn.get_record(zone_id, record_id))
Get record information for the given zone_id on the given profile :param zone_id: Zone to export. :type zone_id: ``str`` :param record_id: Record to delete. :type record_id: ``str`` :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.get_record google.com www profile1
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/libcloud_dns.py#L167-L187
[ "def _get_driver(profile):\n config = __salt__['config.option']('libcloud_dns')[profile]\n cls = get_driver(config['driver'])\n args = config.copy()\n del args['driver']\n args['key'] = config.get('key')\n args['secret'] = config.get('secret', None)\n args['secure'] = config.get('secure', True)\n args['host'] = config.get('host', None)\n args['port'] = config.get('port', None)\n return cls(**args)\n", "def _simple_record(record):\n return {\n 'id': record.id,\n 'name': record.name,\n 'type': record.type,\n 'data': record.data,\n 'zone': _simple_zone(record.zone),\n 'ttl': record.ttl,\n 'extra': record.extra\n }\n" ]
# -*- coding: utf-8 -*- ''' Apache Libcloud DNS Management ============================== Connection module for Apache Libcloud DNS management .. versionadded:: 2016.11.0 :configuration: This module uses a configuration profile for one or multiple DNS providers .. code-block:: yaml libcloud_dns: profile_test1: driver: cloudflare key: 12345 secret: mysecret profile_test2: driver: godaddy key: 12345 secret: mysecret shopper_id: 12345 :depends: apache-libcloud ''' # keep lint from choking on _get_conn and _cache_id #pylint: disable=E0602 from __future__ import absolute_import, unicode_literals, print_function # Import Python libs import logging # Import salt libs import salt.utils.compat from salt.utils.versions import LooseVersion as _LooseVersion log = logging.getLogger(__name__) # Import third party libs REQUIRED_LIBCLOUD_VERSION = '2.0.0' try: #pylint: disable=unused-import import libcloud from libcloud.dns.providers import get_driver from libcloud.dns.types import RecordType #pylint: enable=unused-import if hasattr(libcloud, '__version__') and _LooseVersion(libcloud.__version__) < _LooseVersion(REQUIRED_LIBCLOUD_VERSION): raise ImportError() logging.getLogger('libcloud').setLevel(logging.CRITICAL) HAS_LIBCLOUD = True except ImportError: HAS_LIBCLOUD = False def __virtual__(): ''' Only load if libcloud libraries exist. ''' if not HAS_LIBCLOUD: msg = ('A apache-libcloud library with version at least {0} was not ' 'found').format(REQUIRED_LIBCLOUD_VERSION) return (False, msg) return True def __init__(opts): salt.utils.compat.pack_dunder(__name__) def _get_driver(profile): config = __salt__['config.option']('libcloud_dns')[profile] cls = get_driver(config['driver']) args = config.copy() del args['driver'] args['key'] = config.get('key') args['secret'] = config.get('secret', None) args['secure'] = config.get('secure', True) args['host'] = config.get('host', None) args['port'] = config.get('port', None) return cls(**args) def list_record_types(profile): ''' List available record types for the given profile, e.g. A, AAAA :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.list_record_types profile1 ''' conn = _get_driver(profile=profile) return conn.list_record_types() def list_zones(profile): ''' List zones for the given profile :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.list_zones profile1 ''' conn = _get_driver(profile=profile) return [_simple_zone(zone) for zone in conn.list_zones()] def list_records(zone_id, profile, type=None): ''' List records for the given zone_id on the given profile :param zone_id: Zone to export. :type zone_id: ``str`` :param profile: The profile key :type profile: ``str`` :param type: The record type, e.g. A, NS :type type: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.list_records google.com profile1 ''' conn = _get_driver(profile=profile) zone = conn.get_zone(zone_id) if type is not None: return [_simple_record(record) for record in conn.list_records(zone) if record.type == type] else: return [_simple_record(record) for record in conn.list_records(zone)] def get_zone(zone_id, profile): ''' Get zone information for the given zone_id on the given profile :param zone_id: Zone to export. :type zone_id: ``str`` :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.get_zone google.com profile1 ''' conn = _get_driver(profile=profile) return _simple_zone(conn.get_zone(zone_id)) def create_zone(domain, profile, type='master', ttl=None): ''' Create a new zone. :param domain: Zone domain name (e.g. example.com) :type domain: ``str`` :param profile: The profile key :type profile: ``str`` :param type: Zone type (master / slave). :type type: ``str`` :param ttl: TTL for new records. (optional) :type ttl: ``int`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.create_zone google.com profile1 ''' conn = _get_driver(profile=profile) zone = conn.create_record(domain, type=type, ttl=ttl) return _simple_zone(zone) def update_zone(zone_id, domain, profile, type='master', ttl=None): ''' Update an existing zone. :param zone_id: Zone ID to update. :type zone_id: ``str`` :param domain: Zone domain name (e.g. example.com) :type domain: ``str`` :param profile: The profile key :type profile: ``str`` :param type: Zone type (master / slave). :type type: ``str`` :param ttl: TTL for new records. (optional) :type ttl: ``int`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.update_zone google.com google.com profile1 type=slave ''' conn = _get_driver(profile=profile) zone = conn.get_zone(zone_id) return _simple_zone(conn.update_zone(zone=zone, domain=domain, type=type, ttl=ttl)) def create_record(name, zone_id, type, data, profile): ''' Create a new record. :param name: Record name without the domain name (e.g. www). Note: If you want to create a record for a base domain name, you should specify empty string ('') for this argument. :type name: ``str`` :param zone_id: Zone where the requested record is created. :type zone_id: ``str`` :param type: DNS record type (A, AAAA, ...). :type type: ``str`` :param data: Data for the record (depends on the record type). :type data: ``str`` :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.create_record www google.com A 12.32.12.2 profile1 ''' conn = _get_driver(profile=profile) record_type = _string_to_record_type(type) zone = conn.get_zone(zone_id) return _simple_record(conn.create_record(name, zone, record_type, data)) def delete_zone(zone_id, profile): ''' Delete a zone. :param zone_id: Zone to delete. :type zone_id: ``str`` :param profile: The profile key :type profile: ``str`` :rtype: ``bool`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.delete_zone google.com profile1 ''' conn = _get_driver(profile=profile) zone = conn.get_zone(zone_id=zone_id) return conn.delete_zone(zone) def delete_record(zone_id, record_id, profile): ''' Delete a record. :param zone_id: Zone to delete. :type zone_id: ``str`` :param record_id: Record to delete. :type record_id: ``str`` :param profile: The profile key :type profile: ``str`` :rtype: ``bool`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.delete_record google.com www profile1 ''' conn = _get_driver(profile=profile) record = conn.get_record(zone_id=zone_id, record_id=record_id) return conn.delete_record(record) def get_bind_data(zone_id, profile): ''' Export Zone to the BIND compatible format. :param zone_id: Zone to export. :type zone_id: ``str`` :param profile: The profile key :type profile: ``str`` :return: Zone data in BIND compatible format. :rtype: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.get_bind_data google.com profile1 ''' conn = _get_driver(profile=profile) zone = conn.get_zone(zone_id) return conn.export_zone_to_bind_format(zone) def extra(method, profile, **libcloud_kwargs): ''' Call an extended method on the driver :param method: Driver's method name :type method: ``str`` :param profile: The profile key :type profile: ``str`` :param libcloud_kwargs: Extra arguments for the driver's delete_container method :type libcloud_kwargs: ``dict`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.extra ex_get_permissions google container_name=my_container object_name=me.jpg --out=yaml ''' _sanitize_kwargs(libcloud_kwargs) conn = _get_driver(profile=profile) connection_method = getattr(conn, method) return connection_method(**libcloud_kwargs) def _string_to_record_type(string): ''' Return a string representation of a DNS record type to a libcloud RecordType ENUM. :param string: A record type, e.g. A, TXT, NS :type string: ``str`` :rtype: :class:`RecordType` ''' string = string.upper() record_type = getattr(RecordType, string) return record_type def _simple_zone(zone): return { 'id': zone.id, 'domain': zone.domain, 'type': zone.type, 'ttl': zone.ttl, 'extra': zone.extra } def _simple_record(record): return { 'id': record.id, 'name': record.name, 'type': record.type, 'data': record.data, 'zone': _simple_zone(record.zone), 'ttl': record.ttl, 'extra': record.extra }
saltstack/salt
salt/modules/libcloud_dns.py
create_zone
python
def create_zone(domain, profile, type='master', ttl=None): ''' Create a new zone. :param domain: Zone domain name (e.g. example.com) :type domain: ``str`` :param profile: The profile key :type profile: ``str`` :param type: Zone type (master / slave). :type type: ``str`` :param ttl: TTL for new records. (optional) :type ttl: ``int`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.create_zone google.com profile1 ''' conn = _get_driver(profile=profile) zone = conn.create_record(domain, type=type, ttl=ttl) return _simple_zone(zone)
Create a new zone. :param domain: Zone domain name (e.g. example.com) :type domain: ``str`` :param profile: The profile key :type profile: ``str`` :param type: Zone type (master / slave). :type type: ``str`` :param ttl: TTL for new records. (optional) :type ttl: ``int`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.create_zone google.com profile1
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/libcloud_dns.py#L190-L214
[ "def _get_driver(profile):\n config = __salt__['config.option']('libcloud_dns')[profile]\n cls = get_driver(config['driver'])\n args = config.copy()\n del args['driver']\n args['key'] = config.get('key')\n args['secret'] = config.get('secret', None)\n args['secure'] = config.get('secure', True)\n args['host'] = config.get('host', None)\n args['port'] = config.get('port', None)\n return cls(**args)\n", "def _simple_zone(zone):\n return {\n 'id': zone.id,\n 'domain': zone.domain,\n 'type': zone.type,\n 'ttl': zone.ttl,\n 'extra': zone.extra\n }\n" ]
# -*- coding: utf-8 -*- ''' Apache Libcloud DNS Management ============================== Connection module for Apache Libcloud DNS management .. versionadded:: 2016.11.0 :configuration: This module uses a configuration profile for one or multiple DNS providers .. code-block:: yaml libcloud_dns: profile_test1: driver: cloudflare key: 12345 secret: mysecret profile_test2: driver: godaddy key: 12345 secret: mysecret shopper_id: 12345 :depends: apache-libcloud ''' # keep lint from choking on _get_conn and _cache_id #pylint: disable=E0602 from __future__ import absolute_import, unicode_literals, print_function # Import Python libs import logging # Import salt libs import salt.utils.compat from salt.utils.versions import LooseVersion as _LooseVersion log = logging.getLogger(__name__) # Import third party libs REQUIRED_LIBCLOUD_VERSION = '2.0.0' try: #pylint: disable=unused-import import libcloud from libcloud.dns.providers import get_driver from libcloud.dns.types import RecordType #pylint: enable=unused-import if hasattr(libcloud, '__version__') and _LooseVersion(libcloud.__version__) < _LooseVersion(REQUIRED_LIBCLOUD_VERSION): raise ImportError() logging.getLogger('libcloud').setLevel(logging.CRITICAL) HAS_LIBCLOUD = True except ImportError: HAS_LIBCLOUD = False def __virtual__(): ''' Only load if libcloud libraries exist. ''' if not HAS_LIBCLOUD: msg = ('A apache-libcloud library with version at least {0} was not ' 'found').format(REQUIRED_LIBCLOUD_VERSION) return (False, msg) return True def __init__(opts): salt.utils.compat.pack_dunder(__name__) def _get_driver(profile): config = __salt__['config.option']('libcloud_dns')[profile] cls = get_driver(config['driver']) args = config.copy() del args['driver'] args['key'] = config.get('key') args['secret'] = config.get('secret', None) args['secure'] = config.get('secure', True) args['host'] = config.get('host', None) args['port'] = config.get('port', None) return cls(**args) def list_record_types(profile): ''' List available record types for the given profile, e.g. A, AAAA :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.list_record_types profile1 ''' conn = _get_driver(profile=profile) return conn.list_record_types() def list_zones(profile): ''' List zones for the given profile :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.list_zones profile1 ''' conn = _get_driver(profile=profile) return [_simple_zone(zone) for zone in conn.list_zones()] def list_records(zone_id, profile, type=None): ''' List records for the given zone_id on the given profile :param zone_id: Zone to export. :type zone_id: ``str`` :param profile: The profile key :type profile: ``str`` :param type: The record type, e.g. A, NS :type type: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.list_records google.com profile1 ''' conn = _get_driver(profile=profile) zone = conn.get_zone(zone_id) if type is not None: return [_simple_record(record) for record in conn.list_records(zone) if record.type == type] else: return [_simple_record(record) for record in conn.list_records(zone)] def get_zone(zone_id, profile): ''' Get zone information for the given zone_id on the given profile :param zone_id: Zone to export. :type zone_id: ``str`` :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.get_zone google.com profile1 ''' conn = _get_driver(profile=profile) return _simple_zone(conn.get_zone(zone_id)) def get_record(zone_id, record_id, profile): ''' Get record information for the given zone_id on the given profile :param zone_id: Zone to export. :type zone_id: ``str`` :param record_id: Record to delete. :type record_id: ``str`` :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.get_record google.com www profile1 ''' conn = _get_driver(profile=profile) return _simple_record(conn.get_record(zone_id, record_id)) def update_zone(zone_id, domain, profile, type='master', ttl=None): ''' Update an existing zone. :param zone_id: Zone ID to update. :type zone_id: ``str`` :param domain: Zone domain name (e.g. example.com) :type domain: ``str`` :param profile: The profile key :type profile: ``str`` :param type: Zone type (master / slave). :type type: ``str`` :param ttl: TTL for new records. (optional) :type ttl: ``int`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.update_zone google.com google.com profile1 type=slave ''' conn = _get_driver(profile=profile) zone = conn.get_zone(zone_id) return _simple_zone(conn.update_zone(zone=zone, domain=domain, type=type, ttl=ttl)) def create_record(name, zone_id, type, data, profile): ''' Create a new record. :param name: Record name without the domain name (e.g. www). Note: If you want to create a record for a base domain name, you should specify empty string ('') for this argument. :type name: ``str`` :param zone_id: Zone where the requested record is created. :type zone_id: ``str`` :param type: DNS record type (A, AAAA, ...). :type type: ``str`` :param data: Data for the record (depends on the record type). :type data: ``str`` :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.create_record www google.com A 12.32.12.2 profile1 ''' conn = _get_driver(profile=profile) record_type = _string_to_record_type(type) zone = conn.get_zone(zone_id) return _simple_record(conn.create_record(name, zone, record_type, data)) def delete_zone(zone_id, profile): ''' Delete a zone. :param zone_id: Zone to delete. :type zone_id: ``str`` :param profile: The profile key :type profile: ``str`` :rtype: ``bool`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.delete_zone google.com profile1 ''' conn = _get_driver(profile=profile) zone = conn.get_zone(zone_id=zone_id) return conn.delete_zone(zone) def delete_record(zone_id, record_id, profile): ''' Delete a record. :param zone_id: Zone to delete. :type zone_id: ``str`` :param record_id: Record to delete. :type record_id: ``str`` :param profile: The profile key :type profile: ``str`` :rtype: ``bool`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.delete_record google.com www profile1 ''' conn = _get_driver(profile=profile) record = conn.get_record(zone_id=zone_id, record_id=record_id) return conn.delete_record(record) def get_bind_data(zone_id, profile): ''' Export Zone to the BIND compatible format. :param zone_id: Zone to export. :type zone_id: ``str`` :param profile: The profile key :type profile: ``str`` :return: Zone data in BIND compatible format. :rtype: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.get_bind_data google.com profile1 ''' conn = _get_driver(profile=profile) zone = conn.get_zone(zone_id) return conn.export_zone_to_bind_format(zone) def extra(method, profile, **libcloud_kwargs): ''' Call an extended method on the driver :param method: Driver's method name :type method: ``str`` :param profile: The profile key :type profile: ``str`` :param libcloud_kwargs: Extra arguments for the driver's delete_container method :type libcloud_kwargs: ``dict`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.extra ex_get_permissions google container_name=my_container object_name=me.jpg --out=yaml ''' _sanitize_kwargs(libcloud_kwargs) conn = _get_driver(profile=profile) connection_method = getattr(conn, method) return connection_method(**libcloud_kwargs) def _string_to_record_type(string): ''' Return a string representation of a DNS record type to a libcloud RecordType ENUM. :param string: A record type, e.g. A, TXT, NS :type string: ``str`` :rtype: :class:`RecordType` ''' string = string.upper() record_type = getattr(RecordType, string) return record_type def _simple_zone(zone): return { 'id': zone.id, 'domain': zone.domain, 'type': zone.type, 'ttl': zone.ttl, 'extra': zone.extra } def _simple_record(record): return { 'id': record.id, 'name': record.name, 'type': record.type, 'data': record.data, 'zone': _simple_zone(record.zone), 'ttl': record.ttl, 'extra': record.extra }
saltstack/salt
salt/modules/libcloud_dns.py
update_zone
python
def update_zone(zone_id, domain, profile, type='master', ttl=None): ''' Update an existing zone. :param zone_id: Zone ID to update. :type zone_id: ``str`` :param domain: Zone domain name (e.g. example.com) :type domain: ``str`` :param profile: The profile key :type profile: ``str`` :param type: Zone type (master / slave). :type type: ``str`` :param ttl: TTL for new records. (optional) :type ttl: ``int`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.update_zone google.com google.com profile1 type=slave ''' conn = _get_driver(profile=profile) zone = conn.get_zone(zone_id) return _simple_zone(conn.update_zone(zone=zone, domain=domain, type=type, ttl=ttl))
Update an existing zone. :param zone_id: Zone ID to update. :type zone_id: ``str`` :param domain: Zone domain name (e.g. example.com) :type domain: ``str`` :param profile: The profile key :type profile: ``str`` :param type: Zone type (master / slave). :type type: ``str`` :param ttl: TTL for new records. (optional) :type ttl: ``int`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.update_zone google.com google.com profile1 type=slave
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/libcloud_dns.py#L217-L244
[ "def _get_driver(profile):\n config = __salt__['config.option']('libcloud_dns')[profile]\n cls = get_driver(config['driver'])\n args = config.copy()\n del args['driver']\n args['key'] = config.get('key')\n args['secret'] = config.get('secret', None)\n args['secure'] = config.get('secure', True)\n args['host'] = config.get('host', None)\n args['port'] = config.get('port', None)\n return cls(**args)\n", "def _simple_zone(zone):\n return {\n 'id': zone.id,\n 'domain': zone.domain,\n 'type': zone.type,\n 'ttl': zone.ttl,\n 'extra': zone.extra\n }\n" ]
# -*- coding: utf-8 -*- ''' Apache Libcloud DNS Management ============================== Connection module for Apache Libcloud DNS management .. versionadded:: 2016.11.0 :configuration: This module uses a configuration profile for one or multiple DNS providers .. code-block:: yaml libcloud_dns: profile_test1: driver: cloudflare key: 12345 secret: mysecret profile_test2: driver: godaddy key: 12345 secret: mysecret shopper_id: 12345 :depends: apache-libcloud ''' # keep lint from choking on _get_conn and _cache_id #pylint: disable=E0602 from __future__ import absolute_import, unicode_literals, print_function # Import Python libs import logging # Import salt libs import salt.utils.compat from salt.utils.versions import LooseVersion as _LooseVersion log = logging.getLogger(__name__) # Import third party libs REQUIRED_LIBCLOUD_VERSION = '2.0.0' try: #pylint: disable=unused-import import libcloud from libcloud.dns.providers import get_driver from libcloud.dns.types import RecordType #pylint: enable=unused-import if hasattr(libcloud, '__version__') and _LooseVersion(libcloud.__version__) < _LooseVersion(REQUIRED_LIBCLOUD_VERSION): raise ImportError() logging.getLogger('libcloud').setLevel(logging.CRITICAL) HAS_LIBCLOUD = True except ImportError: HAS_LIBCLOUD = False def __virtual__(): ''' Only load if libcloud libraries exist. ''' if not HAS_LIBCLOUD: msg = ('A apache-libcloud library with version at least {0} was not ' 'found').format(REQUIRED_LIBCLOUD_VERSION) return (False, msg) return True def __init__(opts): salt.utils.compat.pack_dunder(__name__) def _get_driver(profile): config = __salt__['config.option']('libcloud_dns')[profile] cls = get_driver(config['driver']) args = config.copy() del args['driver'] args['key'] = config.get('key') args['secret'] = config.get('secret', None) args['secure'] = config.get('secure', True) args['host'] = config.get('host', None) args['port'] = config.get('port', None) return cls(**args) def list_record_types(profile): ''' List available record types for the given profile, e.g. A, AAAA :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.list_record_types profile1 ''' conn = _get_driver(profile=profile) return conn.list_record_types() def list_zones(profile): ''' List zones for the given profile :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.list_zones profile1 ''' conn = _get_driver(profile=profile) return [_simple_zone(zone) for zone in conn.list_zones()] def list_records(zone_id, profile, type=None): ''' List records for the given zone_id on the given profile :param zone_id: Zone to export. :type zone_id: ``str`` :param profile: The profile key :type profile: ``str`` :param type: The record type, e.g. A, NS :type type: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.list_records google.com profile1 ''' conn = _get_driver(profile=profile) zone = conn.get_zone(zone_id) if type is not None: return [_simple_record(record) for record in conn.list_records(zone) if record.type == type] else: return [_simple_record(record) for record in conn.list_records(zone)] def get_zone(zone_id, profile): ''' Get zone information for the given zone_id on the given profile :param zone_id: Zone to export. :type zone_id: ``str`` :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.get_zone google.com profile1 ''' conn = _get_driver(profile=profile) return _simple_zone(conn.get_zone(zone_id)) def get_record(zone_id, record_id, profile): ''' Get record information for the given zone_id on the given profile :param zone_id: Zone to export. :type zone_id: ``str`` :param record_id: Record to delete. :type record_id: ``str`` :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.get_record google.com www profile1 ''' conn = _get_driver(profile=profile) return _simple_record(conn.get_record(zone_id, record_id)) def create_zone(domain, profile, type='master', ttl=None): ''' Create a new zone. :param domain: Zone domain name (e.g. example.com) :type domain: ``str`` :param profile: The profile key :type profile: ``str`` :param type: Zone type (master / slave). :type type: ``str`` :param ttl: TTL for new records. (optional) :type ttl: ``int`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.create_zone google.com profile1 ''' conn = _get_driver(profile=profile) zone = conn.create_record(domain, type=type, ttl=ttl) return _simple_zone(zone) def create_record(name, zone_id, type, data, profile): ''' Create a new record. :param name: Record name without the domain name (e.g. www). Note: If you want to create a record for a base domain name, you should specify empty string ('') for this argument. :type name: ``str`` :param zone_id: Zone where the requested record is created. :type zone_id: ``str`` :param type: DNS record type (A, AAAA, ...). :type type: ``str`` :param data: Data for the record (depends on the record type). :type data: ``str`` :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.create_record www google.com A 12.32.12.2 profile1 ''' conn = _get_driver(profile=profile) record_type = _string_to_record_type(type) zone = conn.get_zone(zone_id) return _simple_record(conn.create_record(name, zone, record_type, data)) def delete_zone(zone_id, profile): ''' Delete a zone. :param zone_id: Zone to delete. :type zone_id: ``str`` :param profile: The profile key :type profile: ``str`` :rtype: ``bool`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.delete_zone google.com profile1 ''' conn = _get_driver(profile=profile) zone = conn.get_zone(zone_id=zone_id) return conn.delete_zone(zone) def delete_record(zone_id, record_id, profile): ''' Delete a record. :param zone_id: Zone to delete. :type zone_id: ``str`` :param record_id: Record to delete. :type record_id: ``str`` :param profile: The profile key :type profile: ``str`` :rtype: ``bool`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.delete_record google.com www profile1 ''' conn = _get_driver(profile=profile) record = conn.get_record(zone_id=zone_id, record_id=record_id) return conn.delete_record(record) def get_bind_data(zone_id, profile): ''' Export Zone to the BIND compatible format. :param zone_id: Zone to export. :type zone_id: ``str`` :param profile: The profile key :type profile: ``str`` :return: Zone data in BIND compatible format. :rtype: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.get_bind_data google.com profile1 ''' conn = _get_driver(profile=profile) zone = conn.get_zone(zone_id) return conn.export_zone_to_bind_format(zone) def extra(method, profile, **libcloud_kwargs): ''' Call an extended method on the driver :param method: Driver's method name :type method: ``str`` :param profile: The profile key :type profile: ``str`` :param libcloud_kwargs: Extra arguments for the driver's delete_container method :type libcloud_kwargs: ``dict`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.extra ex_get_permissions google container_name=my_container object_name=me.jpg --out=yaml ''' _sanitize_kwargs(libcloud_kwargs) conn = _get_driver(profile=profile) connection_method = getattr(conn, method) return connection_method(**libcloud_kwargs) def _string_to_record_type(string): ''' Return a string representation of a DNS record type to a libcloud RecordType ENUM. :param string: A record type, e.g. A, TXT, NS :type string: ``str`` :rtype: :class:`RecordType` ''' string = string.upper() record_type = getattr(RecordType, string) return record_type def _simple_zone(zone): return { 'id': zone.id, 'domain': zone.domain, 'type': zone.type, 'ttl': zone.ttl, 'extra': zone.extra } def _simple_record(record): return { 'id': record.id, 'name': record.name, 'type': record.type, 'data': record.data, 'zone': _simple_zone(record.zone), 'ttl': record.ttl, 'extra': record.extra }
saltstack/salt
salt/modules/libcloud_dns.py
create_record
python
def create_record(name, zone_id, type, data, profile): ''' Create a new record. :param name: Record name without the domain name (e.g. www). Note: If you want to create a record for a base domain name, you should specify empty string ('') for this argument. :type name: ``str`` :param zone_id: Zone where the requested record is created. :type zone_id: ``str`` :param type: DNS record type (A, AAAA, ...). :type type: ``str`` :param data: Data for the record (depends on the record type). :type data: ``str`` :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.create_record www google.com A 12.32.12.2 profile1 ''' conn = _get_driver(profile=profile) record_type = _string_to_record_type(type) zone = conn.get_zone(zone_id) return _simple_record(conn.create_record(name, zone, record_type, data))
Create a new record. :param name: Record name without the domain name (e.g. www). Note: If you want to create a record for a base domain name, you should specify empty string ('') for this argument. :type name: ``str`` :param zone_id: Zone where the requested record is created. :type zone_id: ``str`` :param type: DNS record type (A, AAAA, ...). :type type: ``str`` :param data: Data for the record (depends on the record type). :type data: ``str`` :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.create_record www google.com A 12.32.12.2 profile1
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/libcloud_dns.py#L247-L278
[ "def _get_driver(profile):\n config = __salt__['config.option']('libcloud_dns')[profile]\n cls = get_driver(config['driver'])\n args = config.copy()\n del args['driver']\n args['key'] = config.get('key')\n args['secret'] = config.get('secret', None)\n args['secure'] = config.get('secure', True)\n args['host'] = config.get('host', None)\n args['port'] = config.get('port', None)\n return cls(**args)\n", "def _simple_record(record):\n return {\n 'id': record.id,\n 'name': record.name,\n 'type': record.type,\n 'data': record.data,\n 'zone': _simple_zone(record.zone),\n 'ttl': record.ttl,\n 'extra': record.extra\n }\n", "def _string_to_record_type(string):\n '''\n Return a string representation of a DNS record type to a\n libcloud RecordType ENUM.\n\n :param string: A record type, e.g. A, TXT, NS\n :type string: ``str``\n\n :rtype: :class:`RecordType`\n '''\n string = string.upper()\n record_type = getattr(RecordType, string)\n return record_type\n" ]
# -*- coding: utf-8 -*- ''' Apache Libcloud DNS Management ============================== Connection module for Apache Libcloud DNS management .. versionadded:: 2016.11.0 :configuration: This module uses a configuration profile for one or multiple DNS providers .. code-block:: yaml libcloud_dns: profile_test1: driver: cloudflare key: 12345 secret: mysecret profile_test2: driver: godaddy key: 12345 secret: mysecret shopper_id: 12345 :depends: apache-libcloud ''' # keep lint from choking on _get_conn and _cache_id #pylint: disable=E0602 from __future__ import absolute_import, unicode_literals, print_function # Import Python libs import logging # Import salt libs import salt.utils.compat from salt.utils.versions import LooseVersion as _LooseVersion log = logging.getLogger(__name__) # Import third party libs REQUIRED_LIBCLOUD_VERSION = '2.0.0' try: #pylint: disable=unused-import import libcloud from libcloud.dns.providers import get_driver from libcloud.dns.types import RecordType #pylint: enable=unused-import if hasattr(libcloud, '__version__') and _LooseVersion(libcloud.__version__) < _LooseVersion(REQUIRED_LIBCLOUD_VERSION): raise ImportError() logging.getLogger('libcloud').setLevel(logging.CRITICAL) HAS_LIBCLOUD = True except ImportError: HAS_LIBCLOUD = False def __virtual__(): ''' Only load if libcloud libraries exist. ''' if not HAS_LIBCLOUD: msg = ('A apache-libcloud library with version at least {0} was not ' 'found').format(REQUIRED_LIBCLOUD_VERSION) return (False, msg) return True def __init__(opts): salt.utils.compat.pack_dunder(__name__) def _get_driver(profile): config = __salt__['config.option']('libcloud_dns')[profile] cls = get_driver(config['driver']) args = config.copy() del args['driver'] args['key'] = config.get('key') args['secret'] = config.get('secret', None) args['secure'] = config.get('secure', True) args['host'] = config.get('host', None) args['port'] = config.get('port', None) return cls(**args) def list_record_types(profile): ''' List available record types for the given profile, e.g. A, AAAA :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.list_record_types profile1 ''' conn = _get_driver(profile=profile) return conn.list_record_types() def list_zones(profile): ''' List zones for the given profile :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.list_zones profile1 ''' conn = _get_driver(profile=profile) return [_simple_zone(zone) for zone in conn.list_zones()] def list_records(zone_id, profile, type=None): ''' List records for the given zone_id on the given profile :param zone_id: Zone to export. :type zone_id: ``str`` :param profile: The profile key :type profile: ``str`` :param type: The record type, e.g. A, NS :type type: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.list_records google.com profile1 ''' conn = _get_driver(profile=profile) zone = conn.get_zone(zone_id) if type is not None: return [_simple_record(record) for record in conn.list_records(zone) if record.type == type] else: return [_simple_record(record) for record in conn.list_records(zone)] def get_zone(zone_id, profile): ''' Get zone information for the given zone_id on the given profile :param zone_id: Zone to export. :type zone_id: ``str`` :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.get_zone google.com profile1 ''' conn = _get_driver(profile=profile) return _simple_zone(conn.get_zone(zone_id)) def get_record(zone_id, record_id, profile): ''' Get record information for the given zone_id on the given profile :param zone_id: Zone to export. :type zone_id: ``str`` :param record_id: Record to delete. :type record_id: ``str`` :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.get_record google.com www profile1 ''' conn = _get_driver(profile=profile) return _simple_record(conn.get_record(zone_id, record_id)) def create_zone(domain, profile, type='master', ttl=None): ''' Create a new zone. :param domain: Zone domain name (e.g. example.com) :type domain: ``str`` :param profile: The profile key :type profile: ``str`` :param type: Zone type (master / slave). :type type: ``str`` :param ttl: TTL for new records. (optional) :type ttl: ``int`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.create_zone google.com profile1 ''' conn = _get_driver(profile=profile) zone = conn.create_record(domain, type=type, ttl=ttl) return _simple_zone(zone) def update_zone(zone_id, domain, profile, type='master', ttl=None): ''' Update an existing zone. :param zone_id: Zone ID to update. :type zone_id: ``str`` :param domain: Zone domain name (e.g. example.com) :type domain: ``str`` :param profile: The profile key :type profile: ``str`` :param type: Zone type (master / slave). :type type: ``str`` :param ttl: TTL for new records. (optional) :type ttl: ``int`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.update_zone google.com google.com profile1 type=slave ''' conn = _get_driver(profile=profile) zone = conn.get_zone(zone_id) return _simple_zone(conn.update_zone(zone=zone, domain=domain, type=type, ttl=ttl)) def delete_zone(zone_id, profile): ''' Delete a zone. :param zone_id: Zone to delete. :type zone_id: ``str`` :param profile: The profile key :type profile: ``str`` :rtype: ``bool`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.delete_zone google.com profile1 ''' conn = _get_driver(profile=profile) zone = conn.get_zone(zone_id=zone_id) return conn.delete_zone(zone) def delete_record(zone_id, record_id, profile): ''' Delete a record. :param zone_id: Zone to delete. :type zone_id: ``str`` :param record_id: Record to delete. :type record_id: ``str`` :param profile: The profile key :type profile: ``str`` :rtype: ``bool`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.delete_record google.com www profile1 ''' conn = _get_driver(profile=profile) record = conn.get_record(zone_id=zone_id, record_id=record_id) return conn.delete_record(record) def get_bind_data(zone_id, profile): ''' Export Zone to the BIND compatible format. :param zone_id: Zone to export. :type zone_id: ``str`` :param profile: The profile key :type profile: ``str`` :return: Zone data in BIND compatible format. :rtype: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.get_bind_data google.com profile1 ''' conn = _get_driver(profile=profile) zone = conn.get_zone(zone_id) return conn.export_zone_to_bind_format(zone) def extra(method, profile, **libcloud_kwargs): ''' Call an extended method on the driver :param method: Driver's method name :type method: ``str`` :param profile: The profile key :type profile: ``str`` :param libcloud_kwargs: Extra arguments for the driver's delete_container method :type libcloud_kwargs: ``dict`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.extra ex_get_permissions google container_name=my_container object_name=me.jpg --out=yaml ''' _sanitize_kwargs(libcloud_kwargs) conn = _get_driver(profile=profile) connection_method = getattr(conn, method) return connection_method(**libcloud_kwargs) def _string_to_record_type(string): ''' Return a string representation of a DNS record type to a libcloud RecordType ENUM. :param string: A record type, e.g. A, TXT, NS :type string: ``str`` :rtype: :class:`RecordType` ''' string = string.upper() record_type = getattr(RecordType, string) return record_type def _simple_zone(zone): return { 'id': zone.id, 'domain': zone.domain, 'type': zone.type, 'ttl': zone.ttl, 'extra': zone.extra } def _simple_record(record): return { 'id': record.id, 'name': record.name, 'type': record.type, 'data': record.data, 'zone': _simple_zone(record.zone), 'ttl': record.ttl, 'extra': record.extra }
saltstack/salt
salt/modules/libcloud_dns.py
delete_zone
python
def delete_zone(zone_id, profile): ''' Delete a zone. :param zone_id: Zone to delete. :type zone_id: ``str`` :param profile: The profile key :type profile: ``str`` :rtype: ``bool`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.delete_zone google.com profile1 ''' conn = _get_driver(profile=profile) zone = conn.get_zone(zone_id=zone_id) return conn.delete_zone(zone)
Delete a zone. :param zone_id: Zone to delete. :type zone_id: ``str`` :param profile: The profile key :type profile: ``str`` :rtype: ``bool`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.delete_zone google.com profile1
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/libcloud_dns.py#L281-L301
[ "def _get_driver(profile):\n config = __salt__['config.option']('libcloud_dns')[profile]\n cls = get_driver(config['driver'])\n args = config.copy()\n del args['driver']\n args['key'] = config.get('key')\n args['secret'] = config.get('secret', None)\n args['secure'] = config.get('secure', True)\n args['host'] = config.get('host', None)\n args['port'] = config.get('port', None)\n return cls(**args)\n" ]
# -*- coding: utf-8 -*- ''' Apache Libcloud DNS Management ============================== Connection module for Apache Libcloud DNS management .. versionadded:: 2016.11.0 :configuration: This module uses a configuration profile for one or multiple DNS providers .. code-block:: yaml libcloud_dns: profile_test1: driver: cloudflare key: 12345 secret: mysecret profile_test2: driver: godaddy key: 12345 secret: mysecret shopper_id: 12345 :depends: apache-libcloud ''' # keep lint from choking on _get_conn and _cache_id #pylint: disable=E0602 from __future__ import absolute_import, unicode_literals, print_function # Import Python libs import logging # Import salt libs import salt.utils.compat from salt.utils.versions import LooseVersion as _LooseVersion log = logging.getLogger(__name__) # Import third party libs REQUIRED_LIBCLOUD_VERSION = '2.0.0' try: #pylint: disable=unused-import import libcloud from libcloud.dns.providers import get_driver from libcloud.dns.types import RecordType #pylint: enable=unused-import if hasattr(libcloud, '__version__') and _LooseVersion(libcloud.__version__) < _LooseVersion(REQUIRED_LIBCLOUD_VERSION): raise ImportError() logging.getLogger('libcloud').setLevel(logging.CRITICAL) HAS_LIBCLOUD = True except ImportError: HAS_LIBCLOUD = False def __virtual__(): ''' Only load if libcloud libraries exist. ''' if not HAS_LIBCLOUD: msg = ('A apache-libcloud library with version at least {0} was not ' 'found').format(REQUIRED_LIBCLOUD_VERSION) return (False, msg) return True def __init__(opts): salt.utils.compat.pack_dunder(__name__) def _get_driver(profile): config = __salt__['config.option']('libcloud_dns')[profile] cls = get_driver(config['driver']) args = config.copy() del args['driver'] args['key'] = config.get('key') args['secret'] = config.get('secret', None) args['secure'] = config.get('secure', True) args['host'] = config.get('host', None) args['port'] = config.get('port', None) return cls(**args) def list_record_types(profile): ''' List available record types for the given profile, e.g. A, AAAA :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.list_record_types profile1 ''' conn = _get_driver(profile=profile) return conn.list_record_types() def list_zones(profile): ''' List zones for the given profile :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.list_zones profile1 ''' conn = _get_driver(profile=profile) return [_simple_zone(zone) for zone in conn.list_zones()] def list_records(zone_id, profile, type=None): ''' List records for the given zone_id on the given profile :param zone_id: Zone to export. :type zone_id: ``str`` :param profile: The profile key :type profile: ``str`` :param type: The record type, e.g. A, NS :type type: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.list_records google.com profile1 ''' conn = _get_driver(profile=profile) zone = conn.get_zone(zone_id) if type is not None: return [_simple_record(record) for record in conn.list_records(zone) if record.type == type] else: return [_simple_record(record) for record in conn.list_records(zone)] def get_zone(zone_id, profile): ''' Get zone information for the given zone_id on the given profile :param zone_id: Zone to export. :type zone_id: ``str`` :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.get_zone google.com profile1 ''' conn = _get_driver(profile=profile) return _simple_zone(conn.get_zone(zone_id)) def get_record(zone_id, record_id, profile): ''' Get record information for the given zone_id on the given profile :param zone_id: Zone to export. :type zone_id: ``str`` :param record_id: Record to delete. :type record_id: ``str`` :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.get_record google.com www profile1 ''' conn = _get_driver(profile=profile) return _simple_record(conn.get_record(zone_id, record_id)) def create_zone(domain, profile, type='master', ttl=None): ''' Create a new zone. :param domain: Zone domain name (e.g. example.com) :type domain: ``str`` :param profile: The profile key :type profile: ``str`` :param type: Zone type (master / slave). :type type: ``str`` :param ttl: TTL for new records. (optional) :type ttl: ``int`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.create_zone google.com profile1 ''' conn = _get_driver(profile=profile) zone = conn.create_record(domain, type=type, ttl=ttl) return _simple_zone(zone) def update_zone(zone_id, domain, profile, type='master', ttl=None): ''' Update an existing zone. :param zone_id: Zone ID to update. :type zone_id: ``str`` :param domain: Zone domain name (e.g. example.com) :type domain: ``str`` :param profile: The profile key :type profile: ``str`` :param type: Zone type (master / slave). :type type: ``str`` :param ttl: TTL for new records. (optional) :type ttl: ``int`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.update_zone google.com google.com profile1 type=slave ''' conn = _get_driver(profile=profile) zone = conn.get_zone(zone_id) return _simple_zone(conn.update_zone(zone=zone, domain=domain, type=type, ttl=ttl)) def create_record(name, zone_id, type, data, profile): ''' Create a new record. :param name: Record name without the domain name (e.g. www). Note: If you want to create a record for a base domain name, you should specify empty string ('') for this argument. :type name: ``str`` :param zone_id: Zone where the requested record is created. :type zone_id: ``str`` :param type: DNS record type (A, AAAA, ...). :type type: ``str`` :param data: Data for the record (depends on the record type). :type data: ``str`` :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.create_record www google.com A 12.32.12.2 profile1 ''' conn = _get_driver(profile=profile) record_type = _string_to_record_type(type) zone = conn.get_zone(zone_id) return _simple_record(conn.create_record(name, zone, record_type, data)) def delete_record(zone_id, record_id, profile): ''' Delete a record. :param zone_id: Zone to delete. :type zone_id: ``str`` :param record_id: Record to delete. :type record_id: ``str`` :param profile: The profile key :type profile: ``str`` :rtype: ``bool`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.delete_record google.com www profile1 ''' conn = _get_driver(profile=profile) record = conn.get_record(zone_id=zone_id, record_id=record_id) return conn.delete_record(record) def get_bind_data(zone_id, profile): ''' Export Zone to the BIND compatible format. :param zone_id: Zone to export. :type zone_id: ``str`` :param profile: The profile key :type profile: ``str`` :return: Zone data in BIND compatible format. :rtype: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.get_bind_data google.com profile1 ''' conn = _get_driver(profile=profile) zone = conn.get_zone(zone_id) return conn.export_zone_to_bind_format(zone) def extra(method, profile, **libcloud_kwargs): ''' Call an extended method on the driver :param method: Driver's method name :type method: ``str`` :param profile: The profile key :type profile: ``str`` :param libcloud_kwargs: Extra arguments for the driver's delete_container method :type libcloud_kwargs: ``dict`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.extra ex_get_permissions google container_name=my_container object_name=me.jpg --out=yaml ''' _sanitize_kwargs(libcloud_kwargs) conn = _get_driver(profile=profile) connection_method = getattr(conn, method) return connection_method(**libcloud_kwargs) def _string_to_record_type(string): ''' Return a string representation of a DNS record type to a libcloud RecordType ENUM. :param string: A record type, e.g. A, TXT, NS :type string: ``str`` :rtype: :class:`RecordType` ''' string = string.upper() record_type = getattr(RecordType, string) return record_type def _simple_zone(zone): return { 'id': zone.id, 'domain': zone.domain, 'type': zone.type, 'ttl': zone.ttl, 'extra': zone.extra } def _simple_record(record): return { 'id': record.id, 'name': record.name, 'type': record.type, 'data': record.data, 'zone': _simple_zone(record.zone), 'ttl': record.ttl, 'extra': record.extra }
saltstack/salt
salt/modules/libcloud_dns.py
delete_record
python
def delete_record(zone_id, record_id, profile): ''' Delete a record. :param zone_id: Zone to delete. :type zone_id: ``str`` :param record_id: Record to delete. :type record_id: ``str`` :param profile: The profile key :type profile: ``str`` :rtype: ``bool`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.delete_record google.com www profile1 ''' conn = _get_driver(profile=profile) record = conn.get_record(zone_id=zone_id, record_id=record_id) return conn.delete_record(record)
Delete a record. :param zone_id: Zone to delete. :type zone_id: ``str`` :param record_id: Record to delete. :type record_id: ``str`` :param profile: The profile key :type profile: ``str`` :rtype: ``bool`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.delete_record google.com www profile1
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/libcloud_dns.py#L304-L327
[ "def _get_driver(profile):\n config = __salt__['config.option']('libcloud_dns')[profile]\n cls = get_driver(config['driver'])\n args = config.copy()\n del args['driver']\n args['key'] = config.get('key')\n args['secret'] = config.get('secret', None)\n args['secure'] = config.get('secure', True)\n args['host'] = config.get('host', None)\n args['port'] = config.get('port', None)\n return cls(**args)\n" ]
# -*- coding: utf-8 -*- ''' Apache Libcloud DNS Management ============================== Connection module for Apache Libcloud DNS management .. versionadded:: 2016.11.0 :configuration: This module uses a configuration profile for one or multiple DNS providers .. code-block:: yaml libcloud_dns: profile_test1: driver: cloudflare key: 12345 secret: mysecret profile_test2: driver: godaddy key: 12345 secret: mysecret shopper_id: 12345 :depends: apache-libcloud ''' # keep lint from choking on _get_conn and _cache_id #pylint: disable=E0602 from __future__ import absolute_import, unicode_literals, print_function # Import Python libs import logging # Import salt libs import salt.utils.compat from salt.utils.versions import LooseVersion as _LooseVersion log = logging.getLogger(__name__) # Import third party libs REQUIRED_LIBCLOUD_VERSION = '2.0.0' try: #pylint: disable=unused-import import libcloud from libcloud.dns.providers import get_driver from libcloud.dns.types import RecordType #pylint: enable=unused-import if hasattr(libcloud, '__version__') and _LooseVersion(libcloud.__version__) < _LooseVersion(REQUIRED_LIBCLOUD_VERSION): raise ImportError() logging.getLogger('libcloud').setLevel(logging.CRITICAL) HAS_LIBCLOUD = True except ImportError: HAS_LIBCLOUD = False def __virtual__(): ''' Only load if libcloud libraries exist. ''' if not HAS_LIBCLOUD: msg = ('A apache-libcloud library with version at least {0} was not ' 'found').format(REQUIRED_LIBCLOUD_VERSION) return (False, msg) return True def __init__(opts): salt.utils.compat.pack_dunder(__name__) def _get_driver(profile): config = __salt__['config.option']('libcloud_dns')[profile] cls = get_driver(config['driver']) args = config.copy() del args['driver'] args['key'] = config.get('key') args['secret'] = config.get('secret', None) args['secure'] = config.get('secure', True) args['host'] = config.get('host', None) args['port'] = config.get('port', None) return cls(**args) def list_record_types(profile): ''' List available record types for the given profile, e.g. A, AAAA :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.list_record_types profile1 ''' conn = _get_driver(profile=profile) return conn.list_record_types() def list_zones(profile): ''' List zones for the given profile :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.list_zones profile1 ''' conn = _get_driver(profile=profile) return [_simple_zone(zone) for zone in conn.list_zones()] def list_records(zone_id, profile, type=None): ''' List records for the given zone_id on the given profile :param zone_id: Zone to export. :type zone_id: ``str`` :param profile: The profile key :type profile: ``str`` :param type: The record type, e.g. A, NS :type type: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.list_records google.com profile1 ''' conn = _get_driver(profile=profile) zone = conn.get_zone(zone_id) if type is not None: return [_simple_record(record) for record in conn.list_records(zone) if record.type == type] else: return [_simple_record(record) for record in conn.list_records(zone)] def get_zone(zone_id, profile): ''' Get zone information for the given zone_id on the given profile :param zone_id: Zone to export. :type zone_id: ``str`` :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.get_zone google.com profile1 ''' conn = _get_driver(profile=profile) return _simple_zone(conn.get_zone(zone_id)) def get_record(zone_id, record_id, profile): ''' Get record information for the given zone_id on the given profile :param zone_id: Zone to export. :type zone_id: ``str`` :param record_id: Record to delete. :type record_id: ``str`` :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.get_record google.com www profile1 ''' conn = _get_driver(profile=profile) return _simple_record(conn.get_record(zone_id, record_id)) def create_zone(domain, profile, type='master', ttl=None): ''' Create a new zone. :param domain: Zone domain name (e.g. example.com) :type domain: ``str`` :param profile: The profile key :type profile: ``str`` :param type: Zone type (master / slave). :type type: ``str`` :param ttl: TTL for new records. (optional) :type ttl: ``int`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.create_zone google.com profile1 ''' conn = _get_driver(profile=profile) zone = conn.create_record(domain, type=type, ttl=ttl) return _simple_zone(zone) def update_zone(zone_id, domain, profile, type='master', ttl=None): ''' Update an existing zone. :param zone_id: Zone ID to update. :type zone_id: ``str`` :param domain: Zone domain name (e.g. example.com) :type domain: ``str`` :param profile: The profile key :type profile: ``str`` :param type: Zone type (master / slave). :type type: ``str`` :param ttl: TTL for new records. (optional) :type ttl: ``int`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.update_zone google.com google.com profile1 type=slave ''' conn = _get_driver(profile=profile) zone = conn.get_zone(zone_id) return _simple_zone(conn.update_zone(zone=zone, domain=domain, type=type, ttl=ttl)) def create_record(name, zone_id, type, data, profile): ''' Create a new record. :param name: Record name without the domain name (e.g. www). Note: If you want to create a record for a base domain name, you should specify empty string ('') for this argument. :type name: ``str`` :param zone_id: Zone where the requested record is created. :type zone_id: ``str`` :param type: DNS record type (A, AAAA, ...). :type type: ``str`` :param data: Data for the record (depends on the record type). :type data: ``str`` :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.create_record www google.com A 12.32.12.2 profile1 ''' conn = _get_driver(profile=profile) record_type = _string_to_record_type(type) zone = conn.get_zone(zone_id) return _simple_record(conn.create_record(name, zone, record_type, data)) def delete_zone(zone_id, profile): ''' Delete a zone. :param zone_id: Zone to delete. :type zone_id: ``str`` :param profile: The profile key :type profile: ``str`` :rtype: ``bool`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.delete_zone google.com profile1 ''' conn = _get_driver(profile=profile) zone = conn.get_zone(zone_id=zone_id) return conn.delete_zone(zone) def get_bind_data(zone_id, profile): ''' Export Zone to the BIND compatible format. :param zone_id: Zone to export. :type zone_id: ``str`` :param profile: The profile key :type profile: ``str`` :return: Zone data in BIND compatible format. :rtype: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.get_bind_data google.com profile1 ''' conn = _get_driver(profile=profile) zone = conn.get_zone(zone_id) return conn.export_zone_to_bind_format(zone) def extra(method, profile, **libcloud_kwargs): ''' Call an extended method on the driver :param method: Driver's method name :type method: ``str`` :param profile: The profile key :type profile: ``str`` :param libcloud_kwargs: Extra arguments for the driver's delete_container method :type libcloud_kwargs: ``dict`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.extra ex_get_permissions google container_name=my_container object_name=me.jpg --out=yaml ''' _sanitize_kwargs(libcloud_kwargs) conn = _get_driver(profile=profile) connection_method = getattr(conn, method) return connection_method(**libcloud_kwargs) def _string_to_record_type(string): ''' Return a string representation of a DNS record type to a libcloud RecordType ENUM. :param string: A record type, e.g. A, TXT, NS :type string: ``str`` :rtype: :class:`RecordType` ''' string = string.upper() record_type = getattr(RecordType, string) return record_type def _simple_zone(zone): return { 'id': zone.id, 'domain': zone.domain, 'type': zone.type, 'ttl': zone.ttl, 'extra': zone.extra } def _simple_record(record): return { 'id': record.id, 'name': record.name, 'type': record.type, 'data': record.data, 'zone': _simple_zone(record.zone), 'ttl': record.ttl, 'extra': record.extra }
saltstack/salt
salt/modules/libcloud_dns.py
get_bind_data
python
def get_bind_data(zone_id, profile): ''' Export Zone to the BIND compatible format. :param zone_id: Zone to export. :type zone_id: ``str`` :param profile: The profile key :type profile: ``str`` :return: Zone data in BIND compatible format. :rtype: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.get_bind_data google.com profile1 ''' conn = _get_driver(profile=profile) zone = conn.get_zone(zone_id) return conn.export_zone_to_bind_format(zone)
Export Zone to the BIND compatible format. :param zone_id: Zone to export. :type zone_id: ``str`` :param profile: The profile key :type profile: ``str`` :return: Zone data in BIND compatible format. :rtype: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.get_bind_data google.com profile1
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/libcloud_dns.py#L330-L351
[ "def _get_driver(profile):\n config = __salt__['config.option']('libcloud_dns')[profile]\n cls = get_driver(config['driver'])\n args = config.copy()\n del args['driver']\n args['key'] = config.get('key')\n args['secret'] = config.get('secret', None)\n args['secure'] = config.get('secure', True)\n args['host'] = config.get('host', None)\n args['port'] = config.get('port', None)\n return cls(**args)\n" ]
# -*- coding: utf-8 -*- ''' Apache Libcloud DNS Management ============================== Connection module for Apache Libcloud DNS management .. versionadded:: 2016.11.0 :configuration: This module uses a configuration profile for one or multiple DNS providers .. code-block:: yaml libcloud_dns: profile_test1: driver: cloudflare key: 12345 secret: mysecret profile_test2: driver: godaddy key: 12345 secret: mysecret shopper_id: 12345 :depends: apache-libcloud ''' # keep lint from choking on _get_conn and _cache_id #pylint: disable=E0602 from __future__ import absolute_import, unicode_literals, print_function # Import Python libs import logging # Import salt libs import salt.utils.compat from salt.utils.versions import LooseVersion as _LooseVersion log = logging.getLogger(__name__) # Import third party libs REQUIRED_LIBCLOUD_VERSION = '2.0.0' try: #pylint: disable=unused-import import libcloud from libcloud.dns.providers import get_driver from libcloud.dns.types import RecordType #pylint: enable=unused-import if hasattr(libcloud, '__version__') and _LooseVersion(libcloud.__version__) < _LooseVersion(REQUIRED_LIBCLOUD_VERSION): raise ImportError() logging.getLogger('libcloud').setLevel(logging.CRITICAL) HAS_LIBCLOUD = True except ImportError: HAS_LIBCLOUD = False def __virtual__(): ''' Only load if libcloud libraries exist. ''' if not HAS_LIBCLOUD: msg = ('A apache-libcloud library with version at least {0} was not ' 'found').format(REQUIRED_LIBCLOUD_VERSION) return (False, msg) return True def __init__(opts): salt.utils.compat.pack_dunder(__name__) def _get_driver(profile): config = __salt__['config.option']('libcloud_dns')[profile] cls = get_driver(config['driver']) args = config.copy() del args['driver'] args['key'] = config.get('key') args['secret'] = config.get('secret', None) args['secure'] = config.get('secure', True) args['host'] = config.get('host', None) args['port'] = config.get('port', None) return cls(**args) def list_record_types(profile): ''' List available record types for the given profile, e.g. A, AAAA :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.list_record_types profile1 ''' conn = _get_driver(profile=profile) return conn.list_record_types() def list_zones(profile): ''' List zones for the given profile :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.list_zones profile1 ''' conn = _get_driver(profile=profile) return [_simple_zone(zone) for zone in conn.list_zones()] def list_records(zone_id, profile, type=None): ''' List records for the given zone_id on the given profile :param zone_id: Zone to export. :type zone_id: ``str`` :param profile: The profile key :type profile: ``str`` :param type: The record type, e.g. A, NS :type type: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.list_records google.com profile1 ''' conn = _get_driver(profile=profile) zone = conn.get_zone(zone_id) if type is not None: return [_simple_record(record) for record in conn.list_records(zone) if record.type == type] else: return [_simple_record(record) for record in conn.list_records(zone)] def get_zone(zone_id, profile): ''' Get zone information for the given zone_id on the given profile :param zone_id: Zone to export. :type zone_id: ``str`` :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.get_zone google.com profile1 ''' conn = _get_driver(profile=profile) return _simple_zone(conn.get_zone(zone_id)) def get_record(zone_id, record_id, profile): ''' Get record information for the given zone_id on the given profile :param zone_id: Zone to export. :type zone_id: ``str`` :param record_id: Record to delete. :type record_id: ``str`` :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.get_record google.com www profile1 ''' conn = _get_driver(profile=profile) return _simple_record(conn.get_record(zone_id, record_id)) def create_zone(domain, profile, type='master', ttl=None): ''' Create a new zone. :param domain: Zone domain name (e.g. example.com) :type domain: ``str`` :param profile: The profile key :type profile: ``str`` :param type: Zone type (master / slave). :type type: ``str`` :param ttl: TTL for new records. (optional) :type ttl: ``int`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.create_zone google.com profile1 ''' conn = _get_driver(profile=profile) zone = conn.create_record(domain, type=type, ttl=ttl) return _simple_zone(zone) def update_zone(zone_id, domain, profile, type='master', ttl=None): ''' Update an existing zone. :param zone_id: Zone ID to update. :type zone_id: ``str`` :param domain: Zone domain name (e.g. example.com) :type domain: ``str`` :param profile: The profile key :type profile: ``str`` :param type: Zone type (master / slave). :type type: ``str`` :param ttl: TTL for new records. (optional) :type ttl: ``int`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.update_zone google.com google.com profile1 type=slave ''' conn = _get_driver(profile=profile) zone = conn.get_zone(zone_id) return _simple_zone(conn.update_zone(zone=zone, domain=domain, type=type, ttl=ttl)) def create_record(name, zone_id, type, data, profile): ''' Create a new record. :param name: Record name without the domain name (e.g. www). Note: If you want to create a record for a base domain name, you should specify empty string ('') for this argument. :type name: ``str`` :param zone_id: Zone where the requested record is created. :type zone_id: ``str`` :param type: DNS record type (A, AAAA, ...). :type type: ``str`` :param data: Data for the record (depends on the record type). :type data: ``str`` :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.create_record www google.com A 12.32.12.2 profile1 ''' conn = _get_driver(profile=profile) record_type = _string_to_record_type(type) zone = conn.get_zone(zone_id) return _simple_record(conn.create_record(name, zone, record_type, data)) def delete_zone(zone_id, profile): ''' Delete a zone. :param zone_id: Zone to delete. :type zone_id: ``str`` :param profile: The profile key :type profile: ``str`` :rtype: ``bool`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.delete_zone google.com profile1 ''' conn = _get_driver(profile=profile) zone = conn.get_zone(zone_id=zone_id) return conn.delete_zone(zone) def delete_record(zone_id, record_id, profile): ''' Delete a record. :param zone_id: Zone to delete. :type zone_id: ``str`` :param record_id: Record to delete. :type record_id: ``str`` :param profile: The profile key :type profile: ``str`` :rtype: ``bool`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.delete_record google.com www profile1 ''' conn = _get_driver(profile=profile) record = conn.get_record(zone_id=zone_id, record_id=record_id) return conn.delete_record(record) def extra(method, profile, **libcloud_kwargs): ''' Call an extended method on the driver :param method: Driver's method name :type method: ``str`` :param profile: The profile key :type profile: ``str`` :param libcloud_kwargs: Extra arguments for the driver's delete_container method :type libcloud_kwargs: ``dict`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.extra ex_get_permissions google container_name=my_container object_name=me.jpg --out=yaml ''' _sanitize_kwargs(libcloud_kwargs) conn = _get_driver(profile=profile) connection_method = getattr(conn, method) return connection_method(**libcloud_kwargs) def _string_to_record_type(string): ''' Return a string representation of a DNS record type to a libcloud RecordType ENUM. :param string: A record type, e.g. A, TXT, NS :type string: ``str`` :rtype: :class:`RecordType` ''' string = string.upper() record_type = getattr(RecordType, string) return record_type def _simple_zone(zone): return { 'id': zone.id, 'domain': zone.domain, 'type': zone.type, 'ttl': zone.ttl, 'extra': zone.extra } def _simple_record(record): return { 'id': record.id, 'name': record.name, 'type': record.type, 'data': record.data, 'zone': _simple_zone(record.zone), 'ttl': record.ttl, 'extra': record.extra }
saltstack/salt
salt/modules/libcloud_dns.py
extra
python
def extra(method, profile, **libcloud_kwargs): ''' Call an extended method on the driver :param method: Driver's method name :type method: ``str`` :param profile: The profile key :type profile: ``str`` :param libcloud_kwargs: Extra arguments for the driver's delete_container method :type libcloud_kwargs: ``dict`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.extra ex_get_permissions google container_name=my_container object_name=me.jpg --out=yaml ''' _sanitize_kwargs(libcloud_kwargs) conn = _get_driver(profile=profile) connection_method = getattr(conn, method) return connection_method(**libcloud_kwargs)
Call an extended method on the driver :param method: Driver's method name :type method: ``str`` :param profile: The profile key :type profile: ``str`` :param libcloud_kwargs: Extra arguments for the driver's delete_container method :type libcloud_kwargs: ``dict`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.extra ex_get_permissions google container_name=my_container object_name=me.jpg --out=yaml
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/libcloud_dns.py#L354-L376
[ "def _get_driver(profile):\n config = __salt__['config.option']('libcloud_dns')[profile]\n cls = get_driver(config['driver'])\n args = config.copy()\n del args['driver']\n args['key'] = config.get('key')\n args['secret'] = config.get('secret', None)\n args['secure'] = config.get('secure', True)\n args['host'] = config.get('host', None)\n args['port'] = config.get('port', None)\n return cls(**args)\n" ]
# -*- coding: utf-8 -*- ''' Apache Libcloud DNS Management ============================== Connection module for Apache Libcloud DNS management .. versionadded:: 2016.11.0 :configuration: This module uses a configuration profile for one or multiple DNS providers .. code-block:: yaml libcloud_dns: profile_test1: driver: cloudflare key: 12345 secret: mysecret profile_test2: driver: godaddy key: 12345 secret: mysecret shopper_id: 12345 :depends: apache-libcloud ''' # keep lint from choking on _get_conn and _cache_id #pylint: disable=E0602 from __future__ import absolute_import, unicode_literals, print_function # Import Python libs import logging # Import salt libs import salt.utils.compat from salt.utils.versions import LooseVersion as _LooseVersion log = logging.getLogger(__name__) # Import third party libs REQUIRED_LIBCLOUD_VERSION = '2.0.0' try: #pylint: disable=unused-import import libcloud from libcloud.dns.providers import get_driver from libcloud.dns.types import RecordType #pylint: enable=unused-import if hasattr(libcloud, '__version__') and _LooseVersion(libcloud.__version__) < _LooseVersion(REQUIRED_LIBCLOUD_VERSION): raise ImportError() logging.getLogger('libcloud').setLevel(logging.CRITICAL) HAS_LIBCLOUD = True except ImportError: HAS_LIBCLOUD = False def __virtual__(): ''' Only load if libcloud libraries exist. ''' if not HAS_LIBCLOUD: msg = ('A apache-libcloud library with version at least {0} was not ' 'found').format(REQUIRED_LIBCLOUD_VERSION) return (False, msg) return True def __init__(opts): salt.utils.compat.pack_dunder(__name__) def _get_driver(profile): config = __salt__['config.option']('libcloud_dns')[profile] cls = get_driver(config['driver']) args = config.copy() del args['driver'] args['key'] = config.get('key') args['secret'] = config.get('secret', None) args['secure'] = config.get('secure', True) args['host'] = config.get('host', None) args['port'] = config.get('port', None) return cls(**args) def list_record_types(profile): ''' List available record types for the given profile, e.g. A, AAAA :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.list_record_types profile1 ''' conn = _get_driver(profile=profile) return conn.list_record_types() def list_zones(profile): ''' List zones for the given profile :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.list_zones profile1 ''' conn = _get_driver(profile=profile) return [_simple_zone(zone) for zone in conn.list_zones()] def list_records(zone_id, profile, type=None): ''' List records for the given zone_id on the given profile :param zone_id: Zone to export. :type zone_id: ``str`` :param profile: The profile key :type profile: ``str`` :param type: The record type, e.g. A, NS :type type: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.list_records google.com profile1 ''' conn = _get_driver(profile=profile) zone = conn.get_zone(zone_id) if type is not None: return [_simple_record(record) for record in conn.list_records(zone) if record.type == type] else: return [_simple_record(record) for record in conn.list_records(zone)] def get_zone(zone_id, profile): ''' Get zone information for the given zone_id on the given profile :param zone_id: Zone to export. :type zone_id: ``str`` :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.get_zone google.com profile1 ''' conn = _get_driver(profile=profile) return _simple_zone(conn.get_zone(zone_id)) def get_record(zone_id, record_id, profile): ''' Get record information for the given zone_id on the given profile :param zone_id: Zone to export. :type zone_id: ``str`` :param record_id: Record to delete. :type record_id: ``str`` :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.get_record google.com www profile1 ''' conn = _get_driver(profile=profile) return _simple_record(conn.get_record(zone_id, record_id)) def create_zone(domain, profile, type='master', ttl=None): ''' Create a new zone. :param domain: Zone domain name (e.g. example.com) :type domain: ``str`` :param profile: The profile key :type profile: ``str`` :param type: Zone type (master / slave). :type type: ``str`` :param ttl: TTL for new records. (optional) :type ttl: ``int`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.create_zone google.com profile1 ''' conn = _get_driver(profile=profile) zone = conn.create_record(domain, type=type, ttl=ttl) return _simple_zone(zone) def update_zone(zone_id, domain, profile, type='master', ttl=None): ''' Update an existing zone. :param zone_id: Zone ID to update. :type zone_id: ``str`` :param domain: Zone domain name (e.g. example.com) :type domain: ``str`` :param profile: The profile key :type profile: ``str`` :param type: Zone type (master / slave). :type type: ``str`` :param ttl: TTL for new records. (optional) :type ttl: ``int`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.update_zone google.com google.com profile1 type=slave ''' conn = _get_driver(profile=profile) zone = conn.get_zone(zone_id) return _simple_zone(conn.update_zone(zone=zone, domain=domain, type=type, ttl=ttl)) def create_record(name, zone_id, type, data, profile): ''' Create a new record. :param name: Record name without the domain name (e.g. www). Note: If you want to create a record for a base domain name, you should specify empty string ('') for this argument. :type name: ``str`` :param zone_id: Zone where the requested record is created. :type zone_id: ``str`` :param type: DNS record type (A, AAAA, ...). :type type: ``str`` :param data: Data for the record (depends on the record type). :type data: ``str`` :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.create_record www google.com A 12.32.12.2 profile1 ''' conn = _get_driver(profile=profile) record_type = _string_to_record_type(type) zone = conn.get_zone(zone_id) return _simple_record(conn.create_record(name, zone, record_type, data)) def delete_zone(zone_id, profile): ''' Delete a zone. :param zone_id: Zone to delete. :type zone_id: ``str`` :param profile: The profile key :type profile: ``str`` :rtype: ``bool`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.delete_zone google.com profile1 ''' conn = _get_driver(profile=profile) zone = conn.get_zone(zone_id=zone_id) return conn.delete_zone(zone) def delete_record(zone_id, record_id, profile): ''' Delete a record. :param zone_id: Zone to delete. :type zone_id: ``str`` :param record_id: Record to delete. :type record_id: ``str`` :param profile: The profile key :type profile: ``str`` :rtype: ``bool`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.delete_record google.com www profile1 ''' conn = _get_driver(profile=profile) record = conn.get_record(zone_id=zone_id, record_id=record_id) return conn.delete_record(record) def get_bind_data(zone_id, profile): ''' Export Zone to the BIND compatible format. :param zone_id: Zone to export. :type zone_id: ``str`` :param profile: The profile key :type profile: ``str`` :return: Zone data in BIND compatible format. :rtype: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.get_bind_data google.com profile1 ''' conn = _get_driver(profile=profile) zone = conn.get_zone(zone_id) return conn.export_zone_to_bind_format(zone) def _string_to_record_type(string): ''' Return a string representation of a DNS record type to a libcloud RecordType ENUM. :param string: A record type, e.g. A, TXT, NS :type string: ``str`` :rtype: :class:`RecordType` ''' string = string.upper() record_type = getattr(RecordType, string) return record_type def _simple_zone(zone): return { 'id': zone.id, 'domain': zone.domain, 'type': zone.type, 'ttl': zone.ttl, 'extra': zone.extra } def _simple_record(record): return { 'id': record.id, 'name': record.name, 'type': record.type, 'data': record.data, 'zone': _simple_zone(record.zone), 'ttl': record.ttl, 'extra': record.extra }
saltstack/salt
salt/modules/libcloud_dns.py
_string_to_record_type
python
def _string_to_record_type(string): ''' Return a string representation of a DNS record type to a libcloud RecordType ENUM. :param string: A record type, e.g. A, TXT, NS :type string: ``str`` :rtype: :class:`RecordType` ''' string = string.upper() record_type = getattr(RecordType, string) return record_type
Return a string representation of a DNS record type to a libcloud RecordType ENUM. :param string: A record type, e.g. A, TXT, NS :type string: ``str`` :rtype: :class:`RecordType`
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/libcloud_dns.py#L379-L391
null
# -*- coding: utf-8 -*- ''' Apache Libcloud DNS Management ============================== Connection module for Apache Libcloud DNS management .. versionadded:: 2016.11.0 :configuration: This module uses a configuration profile for one or multiple DNS providers .. code-block:: yaml libcloud_dns: profile_test1: driver: cloudflare key: 12345 secret: mysecret profile_test2: driver: godaddy key: 12345 secret: mysecret shopper_id: 12345 :depends: apache-libcloud ''' # keep lint from choking on _get_conn and _cache_id #pylint: disable=E0602 from __future__ import absolute_import, unicode_literals, print_function # Import Python libs import logging # Import salt libs import salt.utils.compat from salt.utils.versions import LooseVersion as _LooseVersion log = logging.getLogger(__name__) # Import third party libs REQUIRED_LIBCLOUD_VERSION = '2.0.0' try: #pylint: disable=unused-import import libcloud from libcloud.dns.providers import get_driver from libcloud.dns.types import RecordType #pylint: enable=unused-import if hasattr(libcloud, '__version__') and _LooseVersion(libcloud.__version__) < _LooseVersion(REQUIRED_LIBCLOUD_VERSION): raise ImportError() logging.getLogger('libcloud').setLevel(logging.CRITICAL) HAS_LIBCLOUD = True except ImportError: HAS_LIBCLOUD = False def __virtual__(): ''' Only load if libcloud libraries exist. ''' if not HAS_LIBCLOUD: msg = ('A apache-libcloud library with version at least {0} was not ' 'found').format(REQUIRED_LIBCLOUD_VERSION) return (False, msg) return True def __init__(opts): salt.utils.compat.pack_dunder(__name__) def _get_driver(profile): config = __salt__['config.option']('libcloud_dns')[profile] cls = get_driver(config['driver']) args = config.copy() del args['driver'] args['key'] = config.get('key') args['secret'] = config.get('secret', None) args['secure'] = config.get('secure', True) args['host'] = config.get('host', None) args['port'] = config.get('port', None) return cls(**args) def list_record_types(profile): ''' List available record types for the given profile, e.g. A, AAAA :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.list_record_types profile1 ''' conn = _get_driver(profile=profile) return conn.list_record_types() def list_zones(profile): ''' List zones for the given profile :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.list_zones profile1 ''' conn = _get_driver(profile=profile) return [_simple_zone(zone) for zone in conn.list_zones()] def list_records(zone_id, profile, type=None): ''' List records for the given zone_id on the given profile :param zone_id: Zone to export. :type zone_id: ``str`` :param profile: The profile key :type profile: ``str`` :param type: The record type, e.g. A, NS :type type: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.list_records google.com profile1 ''' conn = _get_driver(profile=profile) zone = conn.get_zone(zone_id) if type is not None: return [_simple_record(record) for record in conn.list_records(zone) if record.type == type] else: return [_simple_record(record) for record in conn.list_records(zone)] def get_zone(zone_id, profile): ''' Get zone information for the given zone_id on the given profile :param zone_id: Zone to export. :type zone_id: ``str`` :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.get_zone google.com profile1 ''' conn = _get_driver(profile=profile) return _simple_zone(conn.get_zone(zone_id)) def get_record(zone_id, record_id, profile): ''' Get record information for the given zone_id on the given profile :param zone_id: Zone to export. :type zone_id: ``str`` :param record_id: Record to delete. :type record_id: ``str`` :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.get_record google.com www profile1 ''' conn = _get_driver(profile=profile) return _simple_record(conn.get_record(zone_id, record_id)) def create_zone(domain, profile, type='master', ttl=None): ''' Create a new zone. :param domain: Zone domain name (e.g. example.com) :type domain: ``str`` :param profile: The profile key :type profile: ``str`` :param type: Zone type (master / slave). :type type: ``str`` :param ttl: TTL for new records. (optional) :type ttl: ``int`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.create_zone google.com profile1 ''' conn = _get_driver(profile=profile) zone = conn.create_record(domain, type=type, ttl=ttl) return _simple_zone(zone) def update_zone(zone_id, domain, profile, type='master', ttl=None): ''' Update an existing zone. :param zone_id: Zone ID to update. :type zone_id: ``str`` :param domain: Zone domain name (e.g. example.com) :type domain: ``str`` :param profile: The profile key :type profile: ``str`` :param type: Zone type (master / slave). :type type: ``str`` :param ttl: TTL for new records. (optional) :type ttl: ``int`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.update_zone google.com google.com profile1 type=slave ''' conn = _get_driver(profile=profile) zone = conn.get_zone(zone_id) return _simple_zone(conn.update_zone(zone=zone, domain=domain, type=type, ttl=ttl)) def create_record(name, zone_id, type, data, profile): ''' Create a new record. :param name: Record name without the domain name (e.g. www). Note: If you want to create a record for a base domain name, you should specify empty string ('') for this argument. :type name: ``str`` :param zone_id: Zone where the requested record is created. :type zone_id: ``str`` :param type: DNS record type (A, AAAA, ...). :type type: ``str`` :param data: Data for the record (depends on the record type). :type data: ``str`` :param profile: The profile key :type profile: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.create_record www google.com A 12.32.12.2 profile1 ''' conn = _get_driver(profile=profile) record_type = _string_to_record_type(type) zone = conn.get_zone(zone_id) return _simple_record(conn.create_record(name, zone, record_type, data)) def delete_zone(zone_id, profile): ''' Delete a zone. :param zone_id: Zone to delete. :type zone_id: ``str`` :param profile: The profile key :type profile: ``str`` :rtype: ``bool`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.delete_zone google.com profile1 ''' conn = _get_driver(profile=profile) zone = conn.get_zone(zone_id=zone_id) return conn.delete_zone(zone) def delete_record(zone_id, record_id, profile): ''' Delete a record. :param zone_id: Zone to delete. :type zone_id: ``str`` :param record_id: Record to delete. :type record_id: ``str`` :param profile: The profile key :type profile: ``str`` :rtype: ``bool`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.delete_record google.com www profile1 ''' conn = _get_driver(profile=profile) record = conn.get_record(zone_id=zone_id, record_id=record_id) return conn.delete_record(record) def get_bind_data(zone_id, profile): ''' Export Zone to the BIND compatible format. :param zone_id: Zone to export. :type zone_id: ``str`` :param profile: The profile key :type profile: ``str`` :return: Zone data in BIND compatible format. :rtype: ``str`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.get_bind_data google.com profile1 ''' conn = _get_driver(profile=profile) zone = conn.get_zone(zone_id) return conn.export_zone_to_bind_format(zone) def extra(method, profile, **libcloud_kwargs): ''' Call an extended method on the driver :param method: Driver's method name :type method: ``str`` :param profile: The profile key :type profile: ``str`` :param libcloud_kwargs: Extra arguments for the driver's delete_container method :type libcloud_kwargs: ``dict`` CLI Example: .. code-block:: bash salt myminion libcloud_dns.extra ex_get_permissions google container_name=my_container object_name=me.jpg --out=yaml ''' _sanitize_kwargs(libcloud_kwargs) conn = _get_driver(profile=profile) connection_method = getattr(conn, method) return connection_method(**libcloud_kwargs) def _simple_zone(zone): return { 'id': zone.id, 'domain': zone.domain, 'type': zone.type, 'ttl': zone.ttl, 'extra': zone.extra } def _simple_record(record): return { 'id': record.id, 'name': record.name, 'type': record.type, 'data': record.data, 'zone': _simple_zone(record.zone), 'ttl': record.ttl, 'extra': record.extra }
saltstack/salt
salt/modules/ifttt.py
_query
python
def _query(event=None, method='GET', args=None, header_dict=None, data=None): ''' Make a web call to IFTTT. ''' secret_key = __salt__['config.get']('ifttt.secret_key') or \ __salt__['config.get']('ifttt:secret_key') path = 'https://maker.ifttt.com/trigger/{0}/with/key/{1}'.format(event, secret_key) if header_dict is None: header_dict = {'Content-type': 'application/json'} if method != 'POST': header_dict['Accept'] = 'application/json' result = salt.utils.http.query( path, method, params={}, data=data, header_dict=header_dict, decode=True, decode_type='auto', text=True, status=True, cookies=True, persist_session=True, opts=__opts__, backend='requests' ) return result
Make a web call to IFTTT.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/ifttt.py#L37-L70
null
# -*- coding: utf-8 -*- ''' Support for IFTTT .. versionadded:: 2015.8.0 Requires an ``api_key`` in ``/etc/salt/minion``: .. code-block: yaml ifttt: secret_key: '280d4699-a817-4719-ba6f-ca56e573e44f' ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import logging import time # Import salt libs import salt.utils.http import salt.utils.json log = logging.getLogger(__name__) def __virtual__(): ''' Only load the module if apache is installed ''' if not __salt__['config.get']('ifttt.secret_key') and \ not __salt__['config.get']('ifttt:secret_key'): return (False, 'IFTTT Secret Key Unavailable, not loading.') return True def trigger_event(event=None, **kwargs): ''' Trigger a configured event in IFTTT. :param event: The name of the event to trigger. :return: A dictionary with status, text, and error if result was failure. ''' res = {'result': False, 'message': 'Something went wrong'} data = {} for value in ('value1', 'value2', 'value3', 'Value1', 'Value2', 'Value3'): if value in kwargs: data[value.lower()] = kwargs[value] data['occurredat'] = time.strftime("%B %d, %Y %I:%M%p", time.localtime()) result = _query(event=event, method='POST', data=salt.utils.json.dumps(data) ) if 'status' in result: if result['status'] == 200: res['result'] = True res['message'] = result['text'] else: if 'error' in result: res['message'] = result['error'] return res
saltstack/salt
salt/modules/ifttt.py
trigger_event
python
def trigger_event(event=None, **kwargs): ''' Trigger a configured event in IFTTT. :param event: The name of the event to trigger. :return: A dictionary with status, text, and error if result was failure. ''' res = {'result': False, 'message': 'Something went wrong'} data = {} for value in ('value1', 'value2', 'value3', 'Value1', 'Value2', 'Value3'): if value in kwargs: data[value.lower()] = kwargs[value] data['occurredat'] = time.strftime("%B %d, %Y %I:%M%p", time.localtime()) result = _query(event=event, method='POST', data=salt.utils.json.dumps(data) ) if 'status' in result: if result['status'] == 200: res['result'] = True res['message'] = result['text'] else: if 'error' in result: res['message'] = result['error'] return res
Trigger a configured event in IFTTT. :param event: The name of the event to trigger. :return: A dictionary with status, text, and error if result was failure.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/ifttt.py#L73-L101
[ "def dumps(obj, **kwargs):\n '''\n .. versionadded:: 2018.3.0\n\n Wraps json.dumps, and assumes that ensure_ascii is False (unless explicitly\n passed as True) for unicode compatibility. Note that setting it to True\n will mess up any unicode characters, as they will be dumped as the string\n literal version of the unicode code point.\n\n On Python 2, encodes the result to a str since json.dumps does not want\n unicode types.\n\n You can pass an alternate json module (loaded via import_json() above)\n using the _json_module argument)\n '''\n json_module = kwargs.pop('_json_module', json)\n orig_enc_func = kwargs.pop('default', lambda x: x)\n\n def _enc_func(obj):\n obj = ThreadLocalProxy.unproxy(obj)\n return orig_enc_func(obj)\n\n if 'ensure_ascii' not in kwargs:\n kwargs['ensure_ascii'] = False\n if six.PY2:\n obj = salt.utils.data.encode(obj)\n return json_module.dumps(obj, default=_enc_func, **kwargs) # future lint: blacklisted-function\n", "def _query(event=None,\n method='GET',\n args=None,\n header_dict=None,\n data=None):\n '''\n Make a web call to IFTTT.\n '''\n secret_key = __salt__['config.get']('ifttt.secret_key') or \\\n __salt__['config.get']('ifttt:secret_key')\n path = 'https://maker.ifttt.com/trigger/{0}/with/key/{1}'.format(event, secret_key)\n\n if header_dict is None:\n header_dict = {'Content-type': 'application/json'}\n\n if method != 'POST':\n header_dict['Accept'] = 'application/json'\n\n result = salt.utils.http.query(\n path,\n method,\n params={},\n data=data,\n header_dict=header_dict,\n decode=True,\n decode_type='auto',\n text=True,\n status=True,\n cookies=True,\n persist_session=True,\n opts=__opts__,\n backend='requests'\n )\n return result\n" ]
# -*- coding: utf-8 -*- ''' Support for IFTTT .. versionadded:: 2015.8.0 Requires an ``api_key`` in ``/etc/salt/minion``: .. code-block: yaml ifttt: secret_key: '280d4699-a817-4719-ba6f-ca56e573e44f' ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import logging import time # Import salt libs import salt.utils.http import salt.utils.json log = logging.getLogger(__name__) def __virtual__(): ''' Only load the module if apache is installed ''' if not __salt__['config.get']('ifttt.secret_key') and \ not __salt__['config.get']('ifttt:secret_key'): return (False, 'IFTTT Secret Key Unavailable, not loading.') return True def _query(event=None, method='GET', args=None, header_dict=None, data=None): ''' Make a web call to IFTTT. ''' secret_key = __salt__['config.get']('ifttt.secret_key') or \ __salt__['config.get']('ifttt:secret_key') path = 'https://maker.ifttt.com/trigger/{0}/with/key/{1}'.format(event, secret_key) if header_dict is None: header_dict = {'Content-type': 'application/json'} if method != 'POST': header_dict['Accept'] = 'application/json' result = salt.utils.http.query( path, method, params={}, data=data, header_dict=header_dict, decode=True, decode_type='auto', text=True, status=True, cookies=True, persist_session=True, opts=__opts__, backend='requests' ) return result
saltstack/salt
salt/proxy/napalm.py
init
python
def init(opts): ''' Opens the connection with the network device. ''' NETWORK_DEVICE.update(salt.utils.napalm.get_device(opts)) DETAILS['initialized'] = True return True
Opens the connection with the network device.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/proxy/napalm.py#L202-L208
[ "def get_device(opts, salt_obj=None):\n '''\n Initialise the connection with the network device through NAPALM.\n :param: opts\n :return: the network device object\n '''\n log.debug('Setting up NAPALM connection')\n network_device = get_device_opts(opts, salt_obj=salt_obj)\n provider_lib = napalm_base\n if network_device.get('PROVIDER'):\n # In case the user requires a different provider library,\n # other than napalm-base.\n # For example, if napalm-base does not satisfy the requirements\n # and needs to be enahanced with more specific features,\n # we may need to define a custom library on top of napalm-base\n # with the constraint that it still needs to provide the\n # `get_network_driver` function. However, even this can be\n # extended later, if really needed.\n # Configuration example:\n # provider: napalm_base_example\n try:\n provider_lib = importlib.import_module(network_device.get('PROVIDER'))\n except ImportError as ierr:\n log.error('Unable to import %s',\n network_device.get('PROVIDER'),\n exc_info=True)\n log.error('Falling back to napalm-base')\n _driver_ = provider_lib.get_network_driver(network_device.get('DRIVER_NAME'))\n try:\n network_device['DRIVER'] = _driver_(\n network_device.get('HOSTNAME', ''),\n network_device.get('USERNAME', ''),\n network_device.get('PASSWORD', ''),\n timeout=network_device['TIMEOUT'],\n optional_args=network_device['OPTIONAL_ARGS']\n )\n network_device.get('DRIVER').open()\n # no exception raised here, means connection established\n network_device['UP'] = True\n except napalm_base.exceptions.ConnectionException as error:\n base_err_msg = \"Cannot connect to {hostname}{port} as {username}.\".format(\n hostname=network_device.get('HOSTNAME', '[unspecified hostname]'),\n port=(':{port}'.format(port=network_device.get('OPTIONAL_ARGS', {}).get('port'))\n if network_device.get('OPTIONAL_ARGS', {}).get('port') else ''),\n username=network_device.get('USERNAME', '')\n )\n log.error(base_err_msg)\n log.error(\n \"Please check error: %s\", error\n )\n raise napalm_base.exceptions.ConnectionException(base_err_msg)\n return network_device\n" ]
# -*- coding: utf-8 -*- ''' NAPALM: Network Automation and Programmability Abstraction Layer with Multivendor support ========================================================================================= .. versionadded:: 2016.11.0 Proxy minion for managing network devices via NAPALM_ library. :codeauthor: Mircea Ulinic <mircea@cloudflare.com> & Jerome Fleury <jf@cloudflare.com> :maturity: new :depends: napalm :platform: unix Dependencies ------------ The ``napalm`` proxy module requires NAPALM_ library to be installed: ``pip install napalm`` Please check Installation_ for complete details. .. _NAPALM: https://napalm-automation.net/ .. _Installation: http://napalm.readthedocs.io/en/latest/installation/index.html .. note:: Beginning with Salt release 2017.7.3, it is recommended to use ``napalm`` >= ``2.0.0``. The library has been unified into a monolithic package, as in opposite to separate packages per driver. For more details you can check `this document <https://napalm-automation.net/reunification/>`_. While it will still work with the old packages, bear in mind that the NAPALM core team will maintain only the main ``napalm`` package. Moreover, for additional capabilities, the users can always define a library that extends NAPALM's base capabilities and configure the ``provider`` option (see below). Pillar ------ The napalm proxy configuration requires the following parameters in order to connect to the network device: driver Specifies the network device operating system. For a complete list of the supported operating systems please refer to the `NAPALM Read the Docs page`_. host The IP Address or FQDN to use when connecting to the device. Alternatively, the following field names can be used instead: ``hostname``, ``fqdn``, ``ip``. username The username to be used when connecting to the device. passwd The password needed to establish the connection. .. note:: This field may not be mandatory when working with SSH-based drivers, and the username has a SSH key properly configured on the device targeted to be managed. optional_args Dictionary with the optional arguments. Check the complete list of supported `optional arguments`_. always_alive: ``True`` In certain less dynamic environments, maintaining the remote connection permanently open with the network device is not always beneficial. In that case, the user can select to initialize the connection only when needed, by specifying this field to ``false``. Default: ``true`` (maintains the connection with the remote network device). .. versionadded:: 2017.7.0 provider: ``napalm_base`` The library that provides the ``get_network_device`` function. This option is useful when the user has more specific needs and requires to extend the NAPALM capabilities using a private library implementation. The only constraint is that the alternative library needs to have the ``get_network_device`` function available. .. versionadded:: 2017.7.1 multiprocessing: ``False`` Overrides the :conf_minion:`multiprocessing` option, per proxy minion. The ``multiprocessing`` option must be turned off for SSH-based proxies. However, some NAPALM drivers (e.g. Arista, NX-OS) are not SSH-based. As multiple proxy minions may share the same configuration file, this option permits the configuration of the ``multiprocessing`` option more specifically, for some proxy minions. .. versionadded:: 2017.7.2 .. _`NAPALM Read the Docs page`: https://napalm.readthedocs.io/en/latest/#supported-network-operating-systems .. _`optional arguments`: http://napalm.readthedocs.io/en/latest/support/index.html#list-of-supported-optional-arguments Proxy pillar file example: .. code-block:: yaml proxy: proxytype: napalm driver: junos host: core05.nrt02 username: my_username passwd: my_password optional_args: port: 12201 Example using a user-specific library, extending NAPALM's capabilities, e.g. ``custom_napalm_base``: .. code-block:: yaml proxy: proxytype: napalm driver: ios fqdn: cr1.th2.par.as1234.net username: salt password: '' provider: custom_napalm_base .. seealso:: - :mod:`NAPALM grains: select network devices based on their characteristics <salt.grains.napalm>` - :mod:`NET module: network basic features <salt.modules.napalm_network>` - :mod:`Network config state: Manage the configuration using arbitrary templates <salt.states.netconfig>` - :mod:`NAPALM YANG state: Manage the configuration according to the YANG models (OpenConfig/IETF) <salt.states.netyang>` - :mod:`Network ACL module: Generate and load ACL (firewall) configuration <salt.modules.napalm_acl>` - :mod:`Network ACL state: Manage the firewall configuration <salt.states.netacl>` - :mod:`NTP operational and configuration management module <salt.modules.napalm_ntp>` - :mod:`BGP operational and configuration management module <salt.modules.napalm_bgp>` - :mod:`Routes details <salt.modules.napalm_route>` - :mod:`SNMP configuration module <salt.modules.napalm_snmp>` - :mod:`Users configuration management <salt.modules.napalm_users>` .. note:: Beginning with release codename 2019.2.0, any NAPALM command executed when running under a NAPALM Proxy Minion supports the ``force_reconnect`` magic argument. Proxy Minions generally establish a connection with the remote network device at the time of the Minion startup and that connection is going to be used forever. If one would need execute a command on the device but connecting using different parameters (due to various causes, e.g., unable to authenticate the user specified in the Pillar as the authentication system - say TACACS+ is not available, or the DNS resolver is currently down and would like to temporarily use the IP address instead, etc.), it implies updating the Pillar data and restarting the Proxy Minion process restart. In particular cases like that, you can pass the ``force_reconnect=True`` keyword argument, together with the alternative connection details, to enforce the command to be executed over a separate connection. For example, if the usual command is ``salt '*' net.arp``, you can use the following to connect using a different username instead: ``salt '*' net.arp username=my-alt-usr force_reconnect=True``. ''' from __future__ import absolute_import, print_function, unicode_literals # Import python lib import logging log = logging.getLogger(__file__) # Import Salt modules from salt.ext import six import salt.utils.napalm # ---------------------------------------------------------------------------------------------------------------------- # proxy properties # ---------------------------------------------------------------------------------------------------------------------- __proxyenabled__ = ['napalm'] # proxy name # ---------------------------------------------------------------------------------------------------------------------- # global variables # ---------------------------------------------------------------------------------------------------------------------- NETWORK_DEVICE = {} DETAILS = {} # ---------------------------------------------------------------------------------------------------------------------- # property functions # ---------------------------------------------------------------------------------------------------------------------- def __virtual__(): return salt.utils.napalm.virtual(__opts__, 'napalm', __file__) # ---------------------------------------------------------------------------------------------------------------------- # helper functions -- will not be exported # ---------------------------------------------------------------------------------------------------------------------- # ---------------------------------------------------------------------------------------------------------------------- # Proxy functions # ---------------------------------------------------------------------------------------------------------------------- def alive(opts): ''' Return the connection status with the remote device. .. versionadded:: 2017.7.0 ''' if salt.utils.napalm.not_always_alive(opts): return True # don't force reconnection for not-always alive proxies # or regular minion is_alive_ret = call('is_alive', **{}) if not is_alive_ret.get('result', False): log.debug( '[%s] Unable to execute `is_alive`: %s', opts.get('id'), is_alive_ret.get('comment') ) # if `is_alive` is not implemented by the underneath driver, # will consider the connection to be still alive # we don't want overly request connection reestablishment # NOTE: revisit this if IOS is still not stable # and return False to force reconnection return True flag = is_alive_ret.get('out', {}).get('is_alive', False) log.debug('Is %s still alive? %s', opts.get('id'), 'Yes.' if flag else 'No.') return flag def ping(): ''' Connection open successfully? ''' return NETWORK_DEVICE.get('UP', False) def initialized(): ''' Connection finished initializing? ''' return DETAILS.get('initialized', False) def get_device(): ''' Returns the network device object. ''' return NETWORK_DEVICE def get_grains(): ''' Retrieve facts from the network device. ''' return call('get_facts', **{}) def grains_refresh(): ''' Refresh the grains. ''' DETAILS['grains_cache'] = {} return get_grains() def fns(): ''' Method called by NAPALM grains module. ''' return { 'details': 'Network device grains.' } def shutdown(opts): ''' Closes connection with the device. ''' try: if not NETWORK_DEVICE.get('UP', False): raise Exception('not connected!') NETWORK_DEVICE.get('DRIVER').close() except Exception as error: port = NETWORK_DEVICE.get('OPTIONAL_ARGS', {}).get('port') log.error( 'Cannot close connection with %s%s! Please check error: %s', NETWORK_DEVICE.get('HOSTNAME', '[unknown hostname]'), ':{0}'.format(port) if port else '', error ) return True # ---------------------------------------------------------------------------------------------------------------------- # Callable functions # ---------------------------------------------------------------------------------------------------------------------- def call(method, *args, **kwargs): ''' Calls a specific method from the network driver instance. Please check the readthedocs_ page for the updated list of getters. .. _readthedocs: http://napalm.readthedocs.org/en/latest/support/index.html#getters-support-matrix :param method: specifies the name of the method to be called :param params: contains the mapping between the name and the values of the parameters needed to call the method :return: A dictionary with three keys: - result (True/False): if the operation succeeded - out (object): returns the object as-is from the call - comment (string): provides more details in case the call failed - traceback (string): complete traceback in case of exception. Please submit an issue including this traceback on the `correct driver repo`_ and make sure to read the FAQ_ .. _`correct driver repo`: https://github.com/napalm-automation/napalm/issues/new .. _FAQ: https://github.com/napalm-automation/napalm#faq Example: .. code-block:: python __proxy__['napalm.call']('cli' **{ 'commands': [ 'show version', 'show chassis fan' ] }) ''' kwargs_copy = {} kwargs_copy.update(kwargs) for karg, warg in six.iteritems(kwargs_copy): # will remove None values # thus the NAPALM methods will be called with their defaults if warg is None: kwargs.pop(karg) return salt.utils.napalm.call(NETWORK_DEVICE, method, *args, **kwargs)
saltstack/salt
salt/proxy/napalm.py
alive
python
def alive(opts): ''' Return the connection status with the remote device. .. versionadded:: 2017.7.0 ''' if salt.utils.napalm.not_always_alive(opts): return True # don't force reconnection for not-always alive proxies # or regular minion is_alive_ret = call('is_alive', **{}) if not is_alive_ret.get('result', False): log.debug( '[%s] Unable to execute `is_alive`: %s', opts.get('id'), is_alive_ret.get('comment') ) # if `is_alive` is not implemented by the underneath driver, # will consider the connection to be still alive # we don't want overly request connection reestablishment # NOTE: revisit this if IOS is still not stable # and return False to force reconnection return True flag = is_alive_ret.get('out', {}).get('is_alive', False) log.debug('Is %s still alive? %s', opts.get('id'), 'Yes.' if flag else 'No.') return flag
Return the connection status with the remote device. .. versionadded:: 2017.7.0
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/proxy/napalm.py#L211-L234
[ "def not_always_alive(opts):\n '''\n Should this proxy be always alive?\n '''\n return (is_proxy(opts) and not is_always_alive(opts)) or is_minion(opts)\n" ]
# -*- coding: utf-8 -*- ''' NAPALM: Network Automation and Programmability Abstraction Layer with Multivendor support ========================================================================================= .. versionadded:: 2016.11.0 Proxy minion for managing network devices via NAPALM_ library. :codeauthor: Mircea Ulinic <mircea@cloudflare.com> & Jerome Fleury <jf@cloudflare.com> :maturity: new :depends: napalm :platform: unix Dependencies ------------ The ``napalm`` proxy module requires NAPALM_ library to be installed: ``pip install napalm`` Please check Installation_ for complete details. .. _NAPALM: https://napalm-automation.net/ .. _Installation: http://napalm.readthedocs.io/en/latest/installation/index.html .. note:: Beginning with Salt release 2017.7.3, it is recommended to use ``napalm`` >= ``2.0.0``. The library has been unified into a monolithic package, as in opposite to separate packages per driver. For more details you can check `this document <https://napalm-automation.net/reunification/>`_. While it will still work with the old packages, bear in mind that the NAPALM core team will maintain only the main ``napalm`` package. Moreover, for additional capabilities, the users can always define a library that extends NAPALM's base capabilities and configure the ``provider`` option (see below). Pillar ------ The napalm proxy configuration requires the following parameters in order to connect to the network device: driver Specifies the network device operating system. For a complete list of the supported operating systems please refer to the `NAPALM Read the Docs page`_. host The IP Address or FQDN to use when connecting to the device. Alternatively, the following field names can be used instead: ``hostname``, ``fqdn``, ``ip``. username The username to be used when connecting to the device. passwd The password needed to establish the connection. .. note:: This field may not be mandatory when working with SSH-based drivers, and the username has a SSH key properly configured on the device targeted to be managed. optional_args Dictionary with the optional arguments. Check the complete list of supported `optional arguments`_. always_alive: ``True`` In certain less dynamic environments, maintaining the remote connection permanently open with the network device is not always beneficial. In that case, the user can select to initialize the connection only when needed, by specifying this field to ``false``. Default: ``true`` (maintains the connection with the remote network device). .. versionadded:: 2017.7.0 provider: ``napalm_base`` The library that provides the ``get_network_device`` function. This option is useful when the user has more specific needs and requires to extend the NAPALM capabilities using a private library implementation. The only constraint is that the alternative library needs to have the ``get_network_device`` function available. .. versionadded:: 2017.7.1 multiprocessing: ``False`` Overrides the :conf_minion:`multiprocessing` option, per proxy minion. The ``multiprocessing`` option must be turned off for SSH-based proxies. However, some NAPALM drivers (e.g. Arista, NX-OS) are not SSH-based. As multiple proxy minions may share the same configuration file, this option permits the configuration of the ``multiprocessing`` option more specifically, for some proxy minions. .. versionadded:: 2017.7.2 .. _`NAPALM Read the Docs page`: https://napalm.readthedocs.io/en/latest/#supported-network-operating-systems .. _`optional arguments`: http://napalm.readthedocs.io/en/latest/support/index.html#list-of-supported-optional-arguments Proxy pillar file example: .. code-block:: yaml proxy: proxytype: napalm driver: junos host: core05.nrt02 username: my_username passwd: my_password optional_args: port: 12201 Example using a user-specific library, extending NAPALM's capabilities, e.g. ``custom_napalm_base``: .. code-block:: yaml proxy: proxytype: napalm driver: ios fqdn: cr1.th2.par.as1234.net username: salt password: '' provider: custom_napalm_base .. seealso:: - :mod:`NAPALM grains: select network devices based on their characteristics <salt.grains.napalm>` - :mod:`NET module: network basic features <salt.modules.napalm_network>` - :mod:`Network config state: Manage the configuration using arbitrary templates <salt.states.netconfig>` - :mod:`NAPALM YANG state: Manage the configuration according to the YANG models (OpenConfig/IETF) <salt.states.netyang>` - :mod:`Network ACL module: Generate and load ACL (firewall) configuration <salt.modules.napalm_acl>` - :mod:`Network ACL state: Manage the firewall configuration <salt.states.netacl>` - :mod:`NTP operational and configuration management module <salt.modules.napalm_ntp>` - :mod:`BGP operational and configuration management module <salt.modules.napalm_bgp>` - :mod:`Routes details <salt.modules.napalm_route>` - :mod:`SNMP configuration module <salt.modules.napalm_snmp>` - :mod:`Users configuration management <salt.modules.napalm_users>` .. note:: Beginning with release codename 2019.2.0, any NAPALM command executed when running under a NAPALM Proxy Minion supports the ``force_reconnect`` magic argument. Proxy Minions generally establish a connection with the remote network device at the time of the Minion startup and that connection is going to be used forever. If one would need execute a command on the device but connecting using different parameters (due to various causes, e.g., unable to authenticate the user specified in the Pillar as the authentication system - say TACACS+ is not available, or the DNS resolver is currently down and would like to temporarily use the IP address instead, etc.), it implies updating the Pillar data and restarting the Proxy Minion process restart. In particular cases like that, you can pass the ``force_reconnect=True`` keyword argument, together with the alternative connection details, to enforce the command to be executed over a separate connection. For example, if the usual command is ``salt '*' net.arp``, you can use the following to connect using a different username instead: ``salt '*' net.arp username=my-alt-usr force_reconnect=True``. ''' from __future__ import absolute_import, print_function, unicode_literals # Import python lib import logging log = logging.getLogger(__file__) # Import Salt modules from salt.ext import six import salt.utils.napalm # ---------------------------------------------------------------------------------------------------------------------- # proxy properties # ---------------------------------------------------------------------------------------------------------------------- __proxyenabled__ = ['napalm'] # proxy name # ---------------------------------------------------------------------------------------------------------------------- # global variables # ---------------------------------------------------------------------------------------------------------------------- NETWORK_DEVICE = {} DETAILS = {} # ---------------------------------------------------------------------------------------------------------------------- # property functions # ---------------------------------------------------------------------------------------------------------------------- def __virtual__(): return salt.utils.napalm.virtual(__opts__, 'napalm', __file__) # ---------------------------------------------------------------------------------------------------------------------- # helper functions -- will not be exported # ---------------------------------------------------------------------------------------------------------------------- # ---------------------------------------------------------------------------------------------------------------------- # Proxy functions # ---------------------------------------------------------------------------------------------------------------------- def init(opts): ''' Opens the connection with the network device. ''' NETWORK_DEVICE.update(salt.utils.napalm.get_device(opts)) DETAILS['initialized'] = True return True def ping(): ''' Connection open successfully? ''' return NETWORK_DEVICE.get('UP', False) def initialized(): ''' Connection finished initializing? ''' return DETAILS.get('initialized', False) def get_device(): ''' Returns the network device object. ''' return NETWORK_DEVICE def get_grains(): ''' Retrieve facts from the network device. ''' return call('get_facts', **{}) def grains_refresh(): ''' Refresh the grains. ''' DETAILS['grains_cache'] = {} return get_grains() def fns(): ''' Method called by NAPALM grains module. ''' return { 'details': 'Network device grains.' } def shutdown(opts): ''' Closes connection with the device. ''' try: if not NETWORK_DEVICE.get('UP', False): raise Exception('not connected!') NETWORK_DEVICE.get('DRIVER').close() except Exception as error: port = NETWORK_DEVICE.get('OPTIONAL_ARGS', {}).get('port') log.error( 'Cannot close connection with %s%s! Please check error: %s', NETWORK_DEVICE.get('HOSTNAME', '[unknown hostname]'), ':{0}'.format(port) if port else '', error ) return True # ---------------------------------------------------------------------------------------------------------------------- # Callable functions # ---------------------------------------------------------------------------------------------------------------------- def call(method, *args, **kwargs): ''' Calls a specific method from the network driver instance. Please check the readthedocs_ page for the updated list of getters. .. _readthedocs: http://napalm.readthedocs.org/en/latest/support/index.html#getters-support-matrix :param method: specifies the name of the method to be called :param params: contains the mapping between the name and the values of the parameters needed to call the method :return: A dictionary with three keys: - result (True/False): if the operation succeeded - out (object): returns the object as-is from the call - comment (string): provides more details in case the call failed - traceback (string): complete traceback in case of exception. Please submit an issue including this traceback on the `correct driver repo`_ and make sure to read the FAQ_ .. _`correct driver repo`: https://github.com/napalm-automation/napalm/issues/new .. _FAQ: https://github.com/napalm-automation/napalm#faq Example: .. code-block:: python __proxy__['napalm.call']('cli' **{ 'commands': [ 'show version', 'show chassis fan' ] }) ''' kwargs_copy = {} kwargs_copy.update(kwargs) for karg, warg in six.iteritems(kwargs_copy): # will remove None values # thus the NAPALM methods will be called with their defaults if warg is None: kwargs.pop(karg) return salt.utils.napalm.call(NETWORK_DEVICE, method, *args, **kwargs)
saltstack/salt
salt/proxy/napalm.py
shutdown
python
def shutdown(opts): ''' Closes connection with the device. ''' try: if not NETWORK_DEVICE.get('UP', False): raise Exception('not connected!') NETWORK_DEVICE.get('DRIVER').close() except Exception as error: port = NETWORK_DEVICE.get('OPTIONAL_ARGS', {}).get('port') log.error( 'Cannot close connection with %s%s! Please check error: %s', NETWORK_DEVICE.get('HOSTNAME', '[unknown hostname]'), ':{0}'.format(port) if port else '', error ) return True
Closes connection with the device.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/proxy/napalm.py#L282-L299
null
# -*- coding: utf-8 -*- ''' NAPALM: Network Automation and Programmability Abstraction Layer with Multivendor support ========================================================================================= .. versionadded:: 2016.11.0 Proxy minion for managing network devices via NAPALM_ library. :codeauthor: Mircea Ulinic <mircea@cloudflare.com> & Jerome Fleury <jf@cloudflare.com> :maturity: new :depends: napalm :platform: unix Dependencies ------------ The ``napalm`` proxy module requires NAPALM_ library to be installed: ``pip install napalm`` Please check Installation_ for complete details. .. _NAPALM: https://napalm-automation.net/ .. _Installation: http://napalm.readthedocs.io/en/latest/installation/index.html .. note:: Beginning with Salt release 2017.7.3, it is recommended to use ``napalm`` >= ``2.0.0``. The library has been unified into a monolithic package, as in opposite to separate packages per driver. For more details you can check `this document <https://napalm-automation.net/reunification/>`_. While it will still work with the old packages, bear in mind that the NAPALM core team will maintain only the main ``napalm`` package. Moreover, for additional capabilities, the users can always define a library that extends NAPALM's base capabilities and configure the ``provider`` option (see below). Pillar ------ The napalm proxy configuration requires the following parameters in order to connect to the network device: driver Specifies the network device operating system. For a complete list of the supported operating systems please refer to the `NAPALM Read the Docs page`_. host The IP Address or FQDN to use when connecting to the device. Alternatively, the following field names can be used instead: ``hostname``, ``fqdn``, ``ip``. username The username to be used when connecting to the device. passwd The password needed to establish the connection. .. note:: This field may not be mandatory when working with SSH-based drivers, and the username has a SSH key properly configured on the device targeted to be managed. optional_args Dictionary with the optional arguments. Check the complete list of supported `optional arguments`_. always_alive: ``True`` In certain less dynamic environments, maintaining the remote connection permanently open with the network device is not always beneficial. In that case, the user can select to initialize the connection only when needed, by specifying this field to ``false``. Default: ``true`` (maintains the connection with the remote network device). .. versionadded:: 2017.7.0 provider: ``napalm_base`` The library that provides the ``get_network_device`` function. This option is useful when the user has more specific needs and requires to extend the NAPALM capabilities using a private library implementation. The only constraint is that the alternative library needs to have the ``get_network_device`` function available. .. versionadded:: 2017.7.1 multiprocessing: ``False`` Overrides the :conf_minion:`multiprocessing` option, per proxy minion. The ``multiprocessing`` option must be turned off for SSH-based proxies. However, some NAPALM drivers (e.g. Arista, NX-OS) are not SSH-based. As multiple proxy minions may share the same configuration file, this option permits the configuration of the ``multiprocessing`` option more specifically, for some proxy minions. .. versionadded:: 2017.7.2 .. _`NAPALM Read the Docs page`: https://napalm.readthedocs.io/en/latest/#supported-network-operating-systems .. _`optional arguments`: http://napalm.readthedocs.io/en/latest/support/index.html#list-of-supported-optional-arguments Proxy pillar file example: .. code-block:: yaml proxy: proxytype: napalm driver: junos host: core05.nrt02 username: my_username passwd: my_password optional_args: port: 12201 Example using a user-specific library, extending NAPALM's capabilities, e.g. ``custom_napalm_base``: .. code-block:: yaml proxy: proxytype: napalm driver: ios fqdn: cr1.th2.par.as1234.net username: salt password: '' provider: custom_napalm_base .. seealso:: - :mod:`NAPALM grains: select network devices based on their characteristics <salt.grains.napalm>` - :mod:`NET module: network basic features <salt.modules.napalm_network>` - :mod:`Network config state: Manage the configuration using arbitrary templates <salt.states.netconfig>` - :mod:`NAPALM YANG state: Manage the configuration according to the YANG models (OpenConfig/IETF) <salt.states.netyang>` - :mod:`Network ACL module: Generate and load ACL (firewall) configuration <salt.modules.napalm_acl>` - :mod:`Network ACL state: Manage the firewall configuration <salt.states.netacl>` - :mod:`NTP operational and configuration management module <salt.modules.napalm_ntp>` - :mod:`BGP operational and configuration management module <salt.modules.napalm_bgp>` - :mod:`Routes details <salt.modules.napalm_route>` - :mod:`SNMP configuration module <salt.modules.napalm_snmp>` - :mod:`Users configuration management <salt.modules.napalm_users>` .. note:: Beginning with release codename 2019.2.0, any NAPALM command executed when running under a NAPALM Proxy Minion supports the ``force_reconnect`` magic argument. Proxy Minions generally establish a connection with the remote network device at the time of the Minion startup and that connection is going to be used forever. If one would need execute a command on the device but connecting using different parameters (due to various causes, e.g., unable to authenticate the user specified in the Pillar as the authentication system - say TACACS+ is not available, or the DNS resolver is currently down and would like to temporarily use the IP address instead, etc.), it implies updating the Pillar data and restarting the Proxy Minion process restart. In particular cases like that, you can pass the ``force_reconnect=True`` keyword argument, together with the alternative connection details, to enforce the command to be executed over a separate connection. For example, if the usual command is ``salt '*' net.arp``, you can use the following to connect using a different username instead: ``salt '*' net.arp username=my-alt-usr force_reconnect=True``. ''' from __future__ import absolute_import, print_function, unicode_literals # Import python lib import logging log = logging.getLogger(__file__) # Import Salt modules from salt.ext import six import salt.utils.napalm # ---------------------------------------------------------------------------------------------------------------------- # proxy properties # ---------------------------------------------------------------------------------------------------------------------- __proxyenabled__ = ['napalm'] # proxy name # ---------------------------------------------------------------------------------------------------------------------- # global variables # ---------------------------------------------------------------------------------------------------------------------- NETWORK_DEVICE = {} DETAILS = {} # ---------------------------------------------------------------------------------------------------------------------- # property functions # ---------------------------------------------------------------------------------------------------------------------- def __virtual__(): return salt.utils.napalm.virtual(__opts__, 'napalm', __file__) # ---------------------------------------------------------------------------------------------------------------------- # helper functions -- will not be exported # ---------------------------------------------------------------------------------------------------------------------- # ---------------------------------------------------------------------------------------------------------------------- # Proxy functions # ---------------------------------------------------------------------------------------------------------------------- def init(opts): ''' Opens the connection with the network device. ''' NETWORK_DEVICE.update(salt.utils.napalm.get_device(opts)) DETAILS['initialized'] = True return True def alive(opts): ''' Return the connection status with the remote device. .. versionadded:: 2017.7.0 ''' if salt.utils.napalm.not_always_alive(opts): return True # don't force reconnection for not-always alive proxies # or regular minion is_alive_ret = call('is_alive', **{}) if not is_alive_ret.get('result', False): log.debug( '[%s] Unable to execute `is_alive`: %s', opts.get('id'), is_alive_ret.get('comment') ) # if `is_alive` is not implemented by the underneath driver, # will consider the connection to be still alive # we don't want overly request connection reestablishment # NOTE: revisit this if IOS is still not stable # and return False to force reconnection return True flag = is_alive_ret.get('out', {}).get('is_alive', False) log.debug('Is %s still alive? %s', opts.get('id'), 'Yes.' if flag else 'No.') return flag def ping(): ''' Connection open successfully? ''' return NETWORK_DEVICE.get('UP', False) def initialized(): ''' Connection finished initializing? ''' return DETAILS.get('initialized', False) def get_device(): ''' Returns the network device object. ''' return NETWORK_DEVICE def get_grains(): ''' Retrieve facts from the network device. ''' return call('get_facts', **{}) def grains_refresh(): ''' Refresh the grains. ''' DETAILS['grains_cache'] = {} return get_grains() def fns(): ''' Method called by NAPALM grains module. ''' return { 'details': 'Network device grains.' } # ---------------------------------------------------------------------------------------------------------------------- # Callable functions # ---------------------------------------------------------------------------------------------------------------------- def call(method, *args, **kwargs): ''' Calls a specific method from the network driver instance. Please check the readthedocs_ page for the updated list of getters. .. _readthedocs: http://napalm.readthedocs.org/en/latest/support/index.html#getters-support-matrix :param method: specifies the name of the method to be called :param params: contains the mapping between the name and the values of the parameters needed to call the method :return: A dictionary with three keys: - result (True/False): if the operation succeeded - out (object): returns the object as-is from the call - comment (string): provides more details in case the call failed - traceback (string): complete traceback in case of exception. Please submit an issue including this traceback on the `correct driver repo`_ and make sure to read the FAQ_ .. _`correct driver repo`: https://github.com/napalm-automation/napalm/issues/new .. _FAQ: https://github.com/napalm-automation/napalm#faq Example: .. code-block:: python __proxy__['napalm.call']('cli' **{ 'commands': [ 'show version', 'show chassis fan' ] }) ''' kwargs_copy = {} kwargs_copy.update(kwargs) for karg, warg in six.iteritems(kwargs_copy): # will remove None values # thus the NAPALM methods will be called with their defaults if warg is None: kwargs.pop(karg) return salt.utils.napalm.call(NETWORK_DEVICE, method, *args, **kwargs)
saltstack/salt
salt/proxy/napalm.py
call
python
def call(method, *args, **kwargs): ''' Calls a specific method from the network driver instance. Please check the readthedocs_ page for the updated list of getters. .. _readthedocs: http://napalm.readthedocs.org/en/latest/support/index.html#getters-support-matrix :param method: specifies the name of the method to be called :param params: contains the mapping between the name and the values of the parameters needed to call the method :return: A dictionary with three keys: - result (True/False): if the operation succeeded - out (object): returns the object as-is from the call - comment (string): provides more details in case the call failed - traceback (string): complete traceback in case of exception. Please submit an issue including this traceback on the `correct driver repo`_ and make sure to read the FAQ_ .. _`correct driver repo`: https://github.com/napalm-automation/napalm/issues/new .. _FAQ: https://github.com/napalm-automation/napalm#faq Example: .. code-block:: python __proxy__['napalm.call']('cli' **{ 'commands': [ 'show version', 'show chassis fan' ] }) ''' kwargs_copy = {} kwargs_copy.update(kwargs) for karg, warg in six.iteritems(kwargs_copy): # will remove None values # thus the NAPALM methods will be called with their defaults if warg is None: kwargs.pop(karg) return salt.utils.napalm.call(NETWORK_DEVICE, method, *args, **kwargs)
Calls a specific method from the network driver instance. Please check the readthedocs_ page for the updated list of getters. .. _readthedocs: http://napalm.readthedocs.org/en/latest/support/index.html#getters-support-matrix :param method: specifies the name of the method to be called :param params: contains the mapping between the name and the values of the parameters needed to call the method :return: A dictionary with three keys: - result (True/False): if the operation succeeded - out (object): returns the object as-is from the call - comment (string): provides more details in case the call failed - traceback (string): complete traceback in case of exception. Please submit an issue including this traceback on the `correct driver repo`_ and make sure to read the FAQ_ .. _`correct driver repo`: https://github.com/napalm-automation/napalm/issues/new .. _FAQ: https://github.com/napalm-automation/napalm#faq Example: .. code-block:: python __proxy__['napalm.call']('cli' **{ 'commands': [ 'show version', 'show chassis fan' ] })
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/proxy/napalm.py#L306-L346
[ "def iteritems(d, **kw):\n return d.iteritems(**kw)\n", "def call(napalm_device, method, *args, **kwargs):\n '''\n Calls arbitrary methods from the network driver instance.\n Please check the readthedocs_ page for the updated list of getters.\n\n .. _readthedocs: http://napalm.readthedocs.org/en/latest/support/index.html#getters-support-matrix\n\n method\n Specifies the name of the method to be called.\n\n *args\n Arguments.\n\n **kwargs\n More arguments.\n\n :return: A dictionary with three keys:\n\n * result (True/False): if the operation succeeded\n * out (object): returns the object as-is from the call\n * comment (string): provides more details in case the call failed\n * traceback (string): complete traceback in case of exception. \\\n Please submit an issue including this traceback \\\n on the `correct driver repo`_ and make sure to read the FAQ_\n\n .. _`correct driver repo`: https://github.com/napalm-automation/napalm/issues/new\n .. FAQ_: https://github.com/napalm-automation/napalm#faq\n\n Example:\n\n .. code-block:: python\n\n salt.utils.napalm.call(\n napalm_object,\n 'cli',\n [\n 'show version',\n 'show chassis fan'\n ]\n )\n '''\n result = False\n out = None\n opts = napalm_device.get('__opts__', {})\n retry = kwargs.pop('__retry', True) # retry executing the task?\n force_reconnect = kwargs.get('force_reconnect', False)\n if force_reconnect:\n log.debug('Forced reconnection initiated')\n log.debug('The current opts (under the proxy key):')\n log.debug(opts['proxy'])\n opts['proxy'].update(**kwargs)\n log.debug('Updated to:')\n log.debug(opts['proxy'])\n napalm_device = get_device(opts)\n try:\n if not napalm_device.get('UP', False):\n raise Exception('not connected')\n # if connected will try to execute desired command\n kwargs_copy = {}\n kwargs_copy.update(kwargs)\n for karg, warg in six.iteritems(kwargs_copy):\n # lets clear None arguments\n # to not be sent to NAPALM methods\n if warg is None:\n kwargs.pop(karg)\n out = getattr(napalm_device.get('DRIVER'), method)(*args, **kwargs)\n # calls the method with the specified parameters\n result = True\n except Exception as error:\n # either not connected\n # either unable to execute the command\n hostname = napalm_device.get('HOSTNAME', '[unspecified hostname]')\n err_tb = traceback.format_exc() # let's get the full traceback and display for debugging reasons.\n if isinstance(error, NotImplementedError):\n comment = '{method} is not implemented for the NAPALM {driver} driver!'.format(\n method=method,\n driver=napalm_device.get('DRIVER_NAME')\n )\n elif retry and HAS_CONN_CLOSED_EXC_CLASS and isinstance(error, ConnectionClosedException):\n # Received disconection whilst executing the operation.\n # Instructed to retry (default behaviour)\n # thus trying to re-establish the connection\n # and re-execute the command\n # if any of the operations (close, open, call) will rise again ConnectionClosedException\n # it will fail loudly.\n kwargs['__retry'] = False # do not attempt re-executing\n comment = 'Disconnected from {device}. Trying to reconnect.'.format(device=hostname)\n log.error(err_tb)\n log.error(comment)\n log.debug('Clearing the connection with %s', hostname)\n call(napalm_device, 'close', __retry=False) # safely close the connection\n # Make sure we don't leave any TCP connection open behind\n # if we fail to close properly, we might not be able to access the\n log.debug('Re-opening the connection with %s', hostname)\n call(napalm_device, 'open', __retry=False)\n log.debug('Connection re-opened with %s', hostname)\n log.debug('Re-executing %s', method)\n return call(napalm_device, method, *args, **kwargs)\n # If still not able to reconnect and execute the task,\n # the proxy keepalive feature (if enabled) will attempt\n # to reconnect.\n # If the device is using a SSH-based connection, the failure\n # will also notify the paramiko transport and the `is_alive` flag\n # is going to be set correctly.\n # More background: the network device may decide to disconnect,\n # although the SSH session itself is alive and usable, the reason\n # being the lack of activity on the CLI.\n # Paramiko's keepalive doesn't help in this case, as the ServerAliveInterval\n # are targeting the transport layer, whilst the device takes the decision\n # when there isn't any activity on the CLI, thus at the application layer.\n # Moreover, the disconnect is silent and paramiko's is_alive flag will\n # continue to return True, although the connection is already unusable.\n # For more info, see https://github.com/paramiko/paramiko/issues/813.\n # But after a command fails, the `is_alive` flag becomes aware of these\n # changes and will return False from there on. And this is how the\n # Salt proxy keepalive helps: immediately after the first failure, it\n # will know the state of the connection and will try reconnecting.\n else:\n comment = 'Cannot execute \"{method}\" on {device}{port} as {user}. Reason: {error}!'.format(\n device=napalm_device.get('HOSTNAME', '[unspecified hostname]'),\n port=(':{port}'.format(port=napalm_device.get('OPTIONAL_ARGS', {}).get('port'))\n if napalm_device.get('OPTIONAL_ARGS', {}).get('port') else ''),\n user=napalm_device.get('USERNAME', ''),\n method=method,\n error=error\n )\n log.error(comment)\n log.error(err_tb)\n return {\n 'out': {},\n 'result': False,\n 'comment': comment,\n 'traceback': err_tb\n }\n finally:\n if opts and not_always_alive(opts) and napalm_device.get('CLOSE', True):\n # either running in a not-always-alive proxy\n # either running in a regular minion\n # close the connection when the call is over\n # unless the CLOSE is explicitly set as False\n napalm_device['DRIVER'].close()\n return {\n 'out': out,\n 'result': result,\n 'comment': ''\n }\n" ]
# -*- coding: utf-8 -*- ''' NAPALM: Network Automation and Programmability Abstraction Layer with Multivendor support ========================================================================================= .. versionadded:: 2016.11.0 Proxy minion for managing network devices via NAPALM_ library. :codeauthor: Mircea Ulinic <mircea@cloudflare.com> & Jerome Fleury <jf@cloudflare.com> :maturity: new :depends: napalm :platform: unix Dependencies ------------ The ``napalm`` proxy module requires NAPALM_ library to be installed: ``pip install napalm`` Please check Installation_ for complete details. .. _NAPALM: https://napalm-automation.net/ .. _Installation: http://napalm.readthedocs.io/en/latest/installation/index.html .. note:: Beginning with Salt release 2017.7.3, it is recommended to use ``napalm`` >= ``2.0.0``. The library has been unified into a monolithic package, as in opposite to separate packages per driver. For more details you can check `this document <https://napalm-automation.net/reunification/>`_. While it will still work with the old packages, bear in mind that the NAPALM core team will maintain only the main ``napalm`` package. Moreover, for additional capabilities, the users can always define a library that extends NAPALM's base capabilities and configure the ``provider`` option (see below). Pillar ------ The napalm proxy configuration requires the following parameters in order to connect to the network device: driver Specifies the network device operating system. For a complete list of the supported operating systems please refer to the `NAPALM Read the Docs page`_. host The IP Address or FQDN to use when connecting to the device. Alternatively, the following field names can be used instead: ``hostname``, ``fqdn``, ``ip``. username The username to be used when connecting to the device. passwd The password needed to establish the connection. .. note:: This field may not be mandatory when working with SSH-based drivers, and the username has a SSH key properly configured on the device targeted to be managed. optional_args Dictionary with the optional arguments. Check the complete list of supported `optional arguments`_. always_alive: ``True`` In certain less dynamic environments, maintaining the remote connection permanently open with the network device is not always beneficial. In that case, the user can select to initialize the connection only when needed, by specifying this field to ``false``. Default: ``true`` (maintains the connection with the remote network device). .. versionadded:: 2017.7.0 provider: ``napalm_base`` The library that provides the ``get_network_device`` function. This option is useful when the user has more specific needs and requires to extend the NAPALM capabilities using a private library implementation. The only constraint is that the alternative library needs to have the ``get_network_device`` function available. .. versionadded:: 2017.7.1 multiprocessing: ``False`` Overrides the :conf_minion:`multiprocessing` option, per proxy minion. The ``multiprocessing`` option must be turned off for SSH-based proxies. However, some NAPALM drivers (e.g. Arista, NX-OS) are not SSH-based. As multiple proxy minions may share the same configuration file, this option permits the configuration of the ``multiprocessing`` option more specifically, for some proxy minions. .. versionadded:: 2017.7.2 .. _`NAPALM Read the Docs page`: https://napalm.readthedocs.io/en/latest/#supported-network-operating-systems .. _`optional arguments`: http://napalm.readthedocs.io/en/latest/support/index.html#list-of-supported-optional-arguments Proxy pillar file example: .. code-block:: yaml proxy: proxytype: napalm driver: junos host: core05.nrt02 username: my_username passwd: my_password optional_args: port: 12201 Example using a user-specific library, extending NAPALM's capabilities, e.g. ``custom_napalm_base``: .. code-block:: yaml proxy: proxytype: napalm driver: ios fqdn: cr1.th2.par.as1234.net username: salt password: '' provider: custom_napalm_base .. seealso:: - :mod:`NAPALM grains: select network devices based on their characteristics <salt.grains.napalm>` - :mod:`NET module: network basic features <salt.modules.napalm_network>` - :mod:`Network config state: Manage the configuration using arbitrary templates <salt.states.netconfig>` - :mod:`NAPALM YANG state: Manage the configuration according to the YANG models (OpenConfig/IETF) <salt.states.netyang>` - :mod:`Network ACL module: Generate and load ACL (firewall) configuration <salt.modules.napalm_acl>` - :mod:`Network ACL state: Manage the firewall configuration <salt.states.netacl>` - :mod:`NTP operational and configuration management module <salt.modules.napalm_ntp>` - :mod:`BGP operational and configuration management module <salt.modules.napalm_bgp>` - :mod:`Routes details <salt.modules.napalm_route>` - :mod:`SNMP configuration module <salt.modules.napalm_snmp>` - :mod:`Users configuration management <salt.modules.napalm_users>` .. note:: Beginning with release codename 2019.2.0, any NAPALM command executed when running under a NAPALM Proxy Minion supports the ``force_reconnect`` magic argument. Proxy Minions generally establish a connection with the remote network device at the time of the Minion startup and that connection is going to be used forever. If one would need execute a command on the device but connecting using different parameters (due to various causes, e.g., unable to authenticate the user specified in the Pillar as the authentication system - say TACACS+ is not available, or the DNS resolver is currently down and would like to temporarily use the IP address instead, etc.), it implies updating the Pillar data and restarting the Proxy Minion process restart. In particular cases like that, you can pass the ``force_reconnect=True`` keyword argument, together with the alternative connection details, to enforce the command to be executed over a separate connection. For example, if the usual command is ``salt '*' net.arp``, you can use the following to connect using a different username instead: ``salt '*' net.arp username=my-alt-usr force_reconnect=True``. ''' from __future__ import absolute_import, print_function, unicode_literals # Import python lib import logging log = logging.getLogger(__file__) # Import Salt modules from salt.ext import six import salt.utils.napalm # ---------------------------------------------------------------------------------------------------------------------- # proxy properties # ---------------------------------------------------------------------------------------------------------------------- __proxyenabled__ = ['napalm'] # proxy name # ---------------------------------------------------------------------------------------------------------------------- # global variables # ---------------------------------------------------------------------------------------------------------------------- NETWORK_DEVICE = {} DETAILS = {} # ---------------------------------------------------------------------------------------------------------------------- # property functions # ---------------------------------------------------------------------------------------------------------------------- def __virtual__(): return salt.utils.napalm.virtual(__opts__, 'napalm', __file__) # ---------------------------------------------------------------------------------------------------------------------- # helper functions -- will not be exported # ---------------------------------------------------------------------------------------------------------------------- # ---------------------------------------------------------------------------------------------------------------------- # Proxy functions # ---------------------------------------------------------------------------------------------------------------------- def init(opts): ''' Opens the connection with the network device. ''' NETWORK_DEVICE.update(salt.utils.napalm.get_device(opts)) DETAILS['initialized'] = True return True def alive(opts): ''' Return the connection status with the remote device. .. versionadded:: 2017.7.0 ''' if salt.utils.napalm.not_always_alive(opts): return True # don't force reconnection for not-always alive proxies # or regular minion is_alive_ret = call('is_alive', **{}) if not is_alive_ret.get('result', False): log.debug( '[%s] Unable to execute `is_alive`: %s', opts.get('id'), is_alive_ret.get('comment') ) # if `is_alive` is not implemented by the underneath driver, # will consider the connection to be still alive # we don't want overly request connection reestablishment # NOTE: revisit this if IOS is still not stable # and return False to force reconnection return True flag = is_alive_ret.get('out', {}).get('is_alive', False) log.debug('Is %s still alive? %s', opts.get('id'), 'Yes.' if flag else 'No.') return flag def ping(): ''' Connection open successfully? ''' return NETWORK_DEVICE.get('UP', False) def initialized(): ''' Connection finished initializing? ''' return DETAILS.get('initialized', False) def get_device(): ''' Returns the network device object. ''' return NETWORK_DEVICE def get_grains(): ''' Retrieve facts from the network device. ''' return call('get_facts', **{}) def grains_refresh(): ''' Refresh the grains. ''' DETAILS['grains_cache'] = {} return get_grains() def fns(): ''' Method called by NAPALM grains module. ''' return { 'details': 'Network device grains.' } def shutdown(opts): ''' Closes connection with the device. ''' try: if not NETWORK_DEVICE.get('UP', False): raise Exception('not connected!') NETWORK_DEVICE.get('DRIVER').close() except Exception as error: port = NETWORK_DEVICE.get('OPTIONAL_ARGS', {}).get('port') log.error( 'Cannot close connection with %s%s! Please check error: %s', NETWORK_DEVICE.get('HOSTNAME', '[unknown hostname]'), ':{0}'.format(port) if port else '', error ) return True # ---------------------------------------------------------------------------------------------------------------------- # Callable functions # ----------------------------------------------------------------------------------------------------------------------
saltstack/salt
salt/modules/win_license.py
info
python
def info(): ''' Return information about the license, if the license is not correctly activated this will return None. CLI Example: .. code-block:: bash salt '*' license.info ''' cmd = r'cscript C:\Windows\System32\slmgr.vbs /dli' out = __salt__['cmd.run'](cmd) match = re.search(r'Name: (.*)\r\nDescription: (.*)\r\nPartial Product Key: (.*)\r\nLicense Status: (.*)', out, re.MULTILINE) if match is not None: groups = match.groups() return { 'name': groups[0], 'description': groups[1], 'partial_key': groups[2], 'licensed': 'Licensed' in groups[3] } return None
Return information about the license, if the license is not correctly activated this will return None. CLI Example: .. code-block:: bash salt '*' license.info
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_license.py#L106-L132
null
# -*- coding: utf-8 -*- ''' This module allows you to manage windows licensing via slmgr.vbs .. code-block:: bash salt '*' license.install XXXXX-XXXXX-XXXXX-XXXXX-XXXXX ''' # Import Python Libs from __future__ import absolute_import, unicode_literals, print_function import re import logging # Import Salt Libs import salt.utils.platform log = logging.getLogger(__name__) __virtualname__ = 'license' def __virtual__(): ''' Only work on Windows ''' if salt.utils.platform.is_windows(): return __virtualname__ return False def installed(product_key): ''' Check to see if the product key is already installed. Note: This is not 100% accurate as we can only see the last 5 digits of the license. CLI Example: .. code-block:: bash salt '*' license.installed XXXXX-XXXXX-XXXXX-XXXXX-XXXXX ''' cmd = r'cscript C:\Windows\System32\slmgr.vbs /dli' out = __salt__['cmd.run'](cmd) return product_key[-5:] in out def install(product_key): ''' Install the given product key CLI Example: .. code-block:: bash salt '*' license.install XXXXX-XXXXX-XXXXX-XXXXX-XXXXX ''' cmd = r'cscript C:\Windows\System32\slmgr.vbs /ipk {0}'.format(product_key) return __salt__['cmd.run'](cmd) def uninstall(): ''' Uninstall the current product key CLI Example: .. code-block:: bash salt '*' license.uninstall ''' cmd = r'cscript C:\Windows\System32\slmgr.vbs /upk' return __salt__['cmd.run'](cmd) def activate(): ''' Attempt to activate the current machine via Windows Activation CLI Example: .. code-block:: bash salt '*' license.activate ''' cmd = r'cscript C:\Windows\System32\slmgr.vbs /ato' return __salt__['cmd.run'](cmd) def licensed(): ''' Return true if the current machine is licensed correctly CLI Example: .. code-block:: bash salt '*' license.licensed ''' cmd = r'cscript C:\Windows\System32\slmgr.vbs /dli' out = __salt__['cmd.run'](cmd) return 'License Status: Licensed' in out
saltstack/salt
salt/states/boto_apigateway.py
present
python
def present(name, api_name, swagger_file, stage_name, api_key_required, lambda_integration_role, lambda_region=None, stage_variables=None, region=None, key=None, keyid=None, profile=None, lambda_funcname_format='{stage}_{api}_{resource}_{method}', authorization_type='NONE', error_response_template=None, response_template=None): ''' Ensure the spcified api_name with the corresponding swaggerfile is deployed to the given stage_name in AWS ApiGateway. this state currently only supports ApiGateway integration with AWS Lambda, and CORS support is handled through a Mock integration. There may be multiple deployments for the API object, each deployment is tagged with a description (i.e. unique label) in pretty printed json format consisting of the following key/values. .. code-block:: text { "api_name": api_name, "swagger_file": basename_of_swagger_file "swagger_file_md5sum": md5sum_of_swagger_file, "swagger_info_object": info_object_content_in_swagger_file } Please note that the name of the lambda function to be integrated will be derived via the provided lambda_funcname_format parameters: - the default lambda_funcname_format is a string with the following substitutable keys: "{stage}_{api}_{resource}_{method}". The user can choose to reorder the known keys. - the stage key corresponds to the stage_name passed in. - the api key corresponds to the api_name passed in. - the resource corresponds to the resource path defined in the passed swagger file. - the method corresponds to the method for a resource path defined in the passed swagger file. For the default lambda_funcname_format, given the following input: .. code-block:: python api_name = ' Test Service' stage_name = 'alpha' basePath = '/api' path = '/a/{b}/c' method = 'POST' We will end up with the following Lambda Function Name that will be looked up: 'test_service_alpha_a_b_c_post' The canconicalization of these input parameters is done in the following order: 1. lambda_funcname_format is formatted with the input parameters as passed, 2. resulting string is stripped for leading/trailing spaces, 3. path parameter's curly braces are removed from the resource path, 4. consecutive spaces and forward slashes in the paths are replaced with '_' 5. consecutive '_' are replaced with '_' Please note that for error response handling, the swagger file must have an error response model with the following schema. The lambda functions should throw exceptions for any non successful responses. An optional pattern field can be specified in errorMessage field to aid the response mapping from Lambda to the proper error return status codes. .. code-block:: yaml Error: type: object properties: stackTrace: type: array items: type: array items: type: string description: call stack errorType: type: string description: error type errorMessage: type: string description: | Error message, will be matched based on pattern. If no pattern is specified, the default pattern used for response mapping will be +*. name The name of the state definition api_name The name of the rest api that we want to ensure exists in AWS API Gateway swagger_file Name of the location of the swagger rest api definition file in YAML format. stage_name Name of the stage we want to be associated with the given api_name and swagger_file definition api_key_required True or False - whether the API Key is required to call API methods lambda_integration_role The name or ARN of the IAM role that the AWS ApiGateway assumes when it executes your lambda function to handle incoming requests lambda_region The region where we expect to find the lambda functions. This is used to determine the region where we should look for the Lambda Function for integration purposes. The region determination is based on the following priority: 1. lambda_region as passed in (is not None) 2. if lambda_region is None, use the region as if a boto_lambda function were executed without explicitly specifying lambda region. 3. if region determined in (2) is different than the region used by boto_apigateway functions, a final lookup will be attempted using the boto_apigateway region. stage_variables A dict with variables and their values, or a pillar key (string) that contains a dict with variables and their values. key and values in the dict must be strings. {'string': 'string'} region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. lambda_funcname_format Please review the earlier example for the usage. The only substituable keys in the funcname format are {stage}, {api}, {resource}, {method}. Any other keys or positional subsitution parameters will be flagged as an invalid input. authorization_type This field can be either 'NONE', or 'AWS_IAM'. This will be applied to all methods in the given swagger spec file. Default is set to 'NONE' error_response_template String value that defines the response template mapping that should be applied in cases error occurs. Refer to AWS documentation for details: http://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-mapping-template-reference.html If set to None, the following default value is used: .. code-block:: text '#set($inputRoot = $input.path(\'$\'))\\n' '{\\n' ' "errorMessage" : "$inputRoot.errorMessage",\\n' ' "errorType" : "$inputRoot.errorType",\\n' ' "stackTrace" : [\\n' '#foreach($stackTrace in $inputRoot.stackTrace)\\n' ' [\\n' '#foreach($elem in $stackTrace)\\n' ' "$elem"\\n' '#if($foreach.hasNext),#end\\n' '#end\\n' ' ]\\n' '#if($foreach.hasNext),#end\\n' '#end\\n' ' ]\\n' .. versionadded:: 2017.7.0 response_template String value that defines the response template mapping applied in case of success (including OPTIONS method) If set to None, empty ({}) template is assumed, which will transfer response from the lambda function as is. .. versionadded:: 2017.7.0 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) # try to open the swagger file and basic validation swagger = _Swagger(api_name, stage_name, lambda_funcname_format, swagger_file, error_response_template, response_template, common_args) # retrieve stage variables stage_vars = _get_stage_variables(stage_variables) # verify if api and stage already exists ret = swagger.verify_api(ret) if ret.get('publish'): # there is a deployment label with signature matching the given api_name, # swagger file name, swagger file md5 sum, and swagger file info object # just reassociate the stage_name to the given deployment label. if __opts__['test']: ret['comment'] = ('[stage: {0}] will be reassociated to an already available ' 'deployment that matched the given [api_name: {1}] ' 'and [swagger_file: {2}].\n' 'Stage variables will be set ' 'to {3}.'.format(stage_name, api_name, swagger_file, stage_vars)) ret['result'] = None return ret return swagger.publish_api(ret, stage_vars) if ret.get('current'): # already at desired state for the stage, swagger_file, and api_name if __opts__['test']: ret['comment'] = ('[stage: {0}] is already at desired state with an associated ' 'deployment matching the given [api_name: {1}] ' 'and [swagger_file: {2}].\n' 'Stage variables will be set ' 'to {3}.'.format(stage_name, api_name, swagger_file, stage_vars)) ret['result'] = None return swagger.overwrite_stage_variables(ret, stage_vars) # there doesn't exist any previous deployments for the given swagger_file, we need # to redeploy the content of the swagger file to the api, models, and resources object # and finally create a new deployment and tie the stage_name to this new deployment if __opts__['test']: ret['comment'] = ('There is no deployment matching the given [api_name: {0}] ' 'and [swagger_file: {1}]. A new deployment will be ' 'created and the [stage_name: {2}] will then be associated ' 'to the newly created deployment.\n' 'Stage variables will be set ' 'to {3}.'.format(api_name, swagger_file, stage_name, stage_vars)) ret['result'] = None return ret ret = swagger.deploy_api(ret) if ret.get('abort'): return ret ret = swagger.deploy_models(ret) if ret.get('abort'): return ret ret = swagger.deploy_resources(ret, api_key_required=api_key_required, lambda_integration_role=lambda_integration_role, lambda_region=lambda_region, authorization_type=authorization_type) if ret.get('abort'): return ret ret = swagger.publish_api(ret, stage_vars) except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret
Ensure the spcified api_name with the corresponding swaggerfile is deployed to the given stage_name in AWS ApiGateway. this state currently only supports ApiGateway integration with AWS Lambda, and CORS support is handled through a Mock integration. There may be multiple deployments for the API object, each deployment is tagged with a description (i.e. unique label) in pretty printed json format consisting of the following key/values. .. code-block:: text { "api_name": api_name, "swagger_file": basename_of_swagger_file "swagger_file_md5sum": md5sum_of_swagger_file, "swagger_info_object": info_object_content_in_swagger_file } Please note that the name of the lambda function to be integrated will be derived via the provided lambda_funcname_format parameters: - the default lambda_funcname_format is a string with the following substitutable keys: "{stage}_{api}_{resource}_{method}". The user can choose to reorder the known keys. - the stage key corresponds to the stage_name passed in. - the api key corresponds to the api_name passed in. - the resource corresponds to the resource path defined in the passed swagger file. - the method corresponds to the method for a resource path defined in the passed swagger file. For the default lambda_funcname_format, given the following input: .. code-block:: python api_name = ' Test Service' stage_name = 'alpha' basePath = '/api' path = '/a/{b}/c' method = 'POST' We will end up with the following Lambda Function Name that will be looked up: 'test_service_alpha_a_b_c_post' The canconicalization of these input parameters is done in the following order: 1. lambda_funcname_format is formatted with the input parameters as passed, 2. resulting string is stripped for leading/trailing spaces, 3. path parameter's curly braces are removed from the resource path, 4. consecutive spaces and forward slashes in the paths are replaced with '_' 5. consecutive '_' are replaced with '_' Please note that for error response handling, the swagger file must have an error response model with the following schema. The lambda functions should throw exceptions for any non successful responses. An optional pattern field can be specified in errorMessage field to aid the response mapping from Lambda to the proper error return status codes. .. code-block:: yaml Error: type: object properties: stackTrace: type: array items: type: array items: type: string description: call stack errorType: type: string description: error type errorMessage: type: string description: | Error message, will be matched based on pattern. If no pattern is specified, the default pattern used for response mapping will be +*. name The name of the state definition api_name The name of the rest api that we want to ensure exists in AWS API Gateway swagger_file Name of the location of the swagger rest api definition file in YAML format. stage_name Name of the stage we want to be associated with the given api_name and swagger_file definition api_key_required True or False - whether the API Key is required to call API methods lambda_integration_role The name or ARN of the IAM role that the AWS ApiGateway assumes when it executes your lambda function to handle incoming requests lambda_region The region where we expect to find the lambda functions. This is used to determine the region where we should look for the Lambda Function for integration purposes. The region determination is based on the following priority: 1. lambda_region as passed in (is not None) 2. if lambda_region is None, use the region as if a boto_lambda function were executed without explicitly specifying lambda region. 3. if region determined in (2) is different than the region used by boto_apigateway functions, a final lookup will be attempted using the boto_apigateway region. stage_variables A dict with variables and their values, or a pillar key (string) that contains a dict with variables and their values. key and values in the dict must be strings. {'string': 'string'} region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. lambda_funcname_format Please review the earlier example for the usage. The only substituable keys in the funcname format are {stage}, {api}, {resource}, {method}. Any other keys or positional subsitution parameters will be flagged as an invalid input. authorization_type This field can be either 'NONE', or 'AWS_IAM'. This will be applied to all methods in the given swagger spec file. Default is set to 'NONE' error_response_template String value that defines the response template mapping that should be applied in cases error occurs. Refer to AWS documentation for details: http://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-mapping-template-reference.html If set to None, the following default value is used: .. code-block:: text '#set($inputRoot = $input.path(\'$\'))\\n' '{\\n' ' "errorMessage" : "$inputRoot.errorMessage",\\n' ' "errorType" : "$inputRoot.errorType",\\n' ' "stackTrace" : [\\n' '#foreach($stackTrace in $inputRoot.stackTrace)\\n' ' [\\n' '#foreach($elem in $stackTrace)\\n' ' "$elem"\\n' '#if($foreach.hasNext),#end\\n' '#end\\n' ' ]\\n' '#if($foreach.hasNext),#end\\n' '#end\\n' ' ]\\n' .. versionadded:: 2017.7.0 response_template String value that defines the response template mapping applied in case of success (including OPTIONS method) If set to None, empty ({}) template is assumed, which will transfer response from the lambda function as is. .. versionadded:: 2017.7.0
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_apigateway.py#L79-L339
[ "def _get_stage_variables(stage_variables):\n '''\n Helper function to retrieve stage variables from pillars/options, if the\n input is a string\n '''\n ret = dict()\n if stage_variables is None:\n return ret\n\n if isinstance(stage_variables, six.string_types):\n if stage_variables in __opts__:\n ret = __opts__[stage_variables]\n master_opts = __pillar__.get('master', {})\n if stage_variables in master_opts:\n ret = master_opts[stage_variables]\n if stage_variables in __pillar__:\n ret = __pillar__[stage_variables]\n elif isinstance(stage_variables, dict):\n ret = stage_variables\n\n if not isinstance(ret, dict):\n ret = dict()\n\n return ret\n", "def verify_api(self, ret):\n '''\n this method helps determine if the given stage_name is already on a deployment\n label matching the input api_name, swagger_file.\n\n If yes, returns abort with comment indicating already at desired state.\n If not and there is previous deployment labels in AWS matching the given input api_name and\n swagger file, indicate to the caller that we only need to reassociate stage_name to the\n previously existing deployment label.\n '''\n\n if self.restApiId:\n deployed_label_json = self._get_current_deployment_label()\n if deployed_label_json == self.deployment_label_json:\n ret['comment'] = ('Already at desired state, the stage {0} is already at the desired '\n 'deployment label:\\n{1}'.format(self._stage_name, deployed_label_json))\n ret['current'] = True\n return ret\n else:\n self._deploymentId = self._get_desired_deployment_id()\n if self._deploymentId:\n ret['publish'] = True\n return ret\n", "def publish_api(self, ret, stage_variables):\n '''\n this method tie the given stage_name to a deployment matching the given swagger_file\n '''\n stage_desc = dict()\n stage_desc['current_deployment_label'] = self.deployment_label\n stage_desc_json = _dict_to_json_pretty(stage_desc)\n\n if self._deploymentId:\n # just do a reassociate of stage_name to an already existing deployment\n res = self._set_current_deployment(stage_desc_json, stage_variables)\n if not res.get('set'):\n ret['abort'] = True\n ret['result'] = False\n ret['comment'] = res.get('error')\n else:\n ret = _log_changes(ret,\n 'publish_api (reassociate deployment, set stage_variables)',\n res.get('response'))\n else:\n # no deployment existed for the given swagger_file for this Swagger object\n res = __salt__['boto_apigateway.create_api_deployment'](restApiId=self.restApiId,\n stageName=self._stage_name,\n stageDescription=stage_desc_json,\n description=self.deployment_label_json,\n variables=stage_variables,\n **self._common_aws_args)\n if not res.get('created'):\n ret['abort'] = True\n ret['result'] = False\n ret['comment'] = res.get('error')\n else:\n ret = _log_changes(ret, 'publish_api (new deployment)', res.get('deployment'))\n return ret\n" ]
# -*- coding: utf-8 -*- ''' Manage Apigateway Rest APIs =========================== .. versionadded:: 2016.11.0 :depends: - boto >= 2.8.0 - boto3 >= 1.2.1 - botocore >= 1.4.49 Create and destroy rest apis depending on a swagger version 2 definition file. Be aware that this interacts with Amazon's services, and so may incur charges. This module uses ``boto3``, which can be installed via package, or pip. This module accepts explicit vpc credentials but can also utilize IAM roles assigned to the instance through Instance Profiles. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More information available `here <http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html>`_. If IAM roles are not used you need to specify them either in a pillar file or in the minion's config file: .. code-block:: yaml vpc.keyid: GKTADJGHEIQSXMKKRBJ08H vpc.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs It's also possible to specify ``key``, ``keyid`` and ``region`` via a profile, either passed in as a dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 .. code-block:: yaml Ensure Apigateway API exists: boto_apigateway.present: - name: myfunction - region: us-east-1 - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import hashlib import logging import os import re # Import Salt Libs import salt.utils.files import salt.utils.json import salt.utils.yaml # Import 3rd-party libs from salt.ext import six log = logging.getLogger(__name__) def __virtual__(): ''' Only load if boto is available. ''' return 'boto_apigateway' if 'boto_apigateway.describe_apis' in __salt__ else False def _get_stage_variables(stage_variables): ''' Helper function to retrieve stage variables from pillars/options, if the input is a string ''' ret = dict() if stage_variables is None: return ret if isinstance(stage_variables, six.string_types): if stage_variables in __opts__: ret = __opts__[stage_variables] master_opts = __pillar__.get('master', {}) if stage_variables in master_opts: ret = master_opts[stage_variables] if stage_variables in __pillar__: ret = __pillar__[stage_variables] elif isinstance(stage_variables, dict): ret = stage_variables if not isinstance(ret, dict): ret = dict() return ret def absent(name, api_name, stage_name, nuke_api=False, region=None, key=None, keyid=None, profile=None): ''' Ensure the stage_name associated with the given api_name deployed by boto_apigateway's present state is removed. If the currently associated deployment to the given stage_name has no other stages associated with it, the deployment will also be removed. name Name of the swagger file in YAML format api_name Name of the rest api on AWS ApiGateway to ensure is absent. stage_name Name of the stage to be removed irrespective of the swagger file content. If the current deployment associated with the stage_name has no other stages associated with it, the deployment will also be removed. nuke_api If True, removes the API itself only if there are no other stages associated with any other deployments once the given stage_name is removed. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) swagger = _Swagger(api_name, stage_name, '', None, None, None, common_args) if not swagger.restApiId: ret['comment'] = '[Rest API: {0}] does not exist.'.format(api_name) return ret if __opts__['test']: if nuke_api: ret['comment'] = ('[stage: {0}] will be deleted, if there are no other ' 'active stages, the [api: {1} will also be ' 'deleted.'.format(stage_name, api_name)) else: ret['comment'] = ('[stage: {0}] will be deleted.'.format(stage_name)) ret['result'] = None return ret ret = swagger.delete_stage(ret) if ret.get('abort'): return ret if nuke_api and swagger.no_more_deployments_remain(): ret = swagger.delete_api(ret) except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret # Helper Swagger Class for swagger version 2.0 API specification def _gen_md5_filehash(fname, *args): ''' helper function to generate a md5 hash of the swagger definition file any extra argument passed to the function is converted to a string and participates in the hash calculation ''' _hash = hashlib.md5() with salt.utils.files.fopen(fname, 'rb') as f: for chunk in iter(lambda: f.read(4096), b''): _hash.update(chunk) for extra_arg in args: _hash.update(six.b(str(extra_arg))) return _hash.hexdigest() def _dict_to_json_pretty(d, sort_keys=True): ''' helper function to generate pretty printed json output ''' return salt.utils.json.dumps(d, indent=4, separators=(',', ': '), sort_keys=sort_keys) # Heuristic on whether or not the property name loosely matches given set of 'interesting' factors # If you are interested in IDs for example, 'id', 'blah_id', 'blahId' would all match def _name_matches(name, matches): ''' Helper function to see if given name has any of the patterns in given matches ''' for m in matches: if name.endswith(m): return True if name.lower().endswith('_' + m.lower()): return True if name.lower() == m.lower(): return True return False def _object_reducer(o, names=('id', 'name', 'path', 'httpMethod', 'statusCode', 'Created', 'Deleted', 'Updated', 'Flushed', 'Associated', 'Disassociated')): ''' Helper function to reduce the amount of information that will be kept in the change log for API GW related return values ''' result = {} if isinstance(o, dict): for k, v in six.iteritems(o): if isinstance(v, dict): reduced = v if k == 'variables' else _object_reducer(v, names) if reduced or _name_matches(k, names): result[k] = reduced elif isinstance(v, list): newlist = [] for val in v: reduced = _object_reducer(val, names) if reduced or _name_matches(k, names): newlist.append(reduced) if newlist: result[k] = newlist else: if _name_matches(k, names): result[k] = v return result def _log_changes(ret, changekey, changevalue): ''' For logging create/update/delete operations to AWS ApiGateway ''' cl = ret['changes'].get('new', []) cl.append({changekey: _object_reducer(changevalue)}) ret['changes']['new'] = cl return ret def _log_error_and_abort(ret, obj): ''' helper function to update errors in the return structure ''' ret['result'] = False ret['abort'] = True if 'error' in obj: ret['comment'] = '{0}'.format(obj.get('error')) return ret class _Swagger(object): ''' this is a helper class that holds the swagger definition file and the associated logic related to how to interpret the file and apply it to AWS Api Gateway. The main interface to the outside world is in deploy_api, deploy_models, and deploy_resources methods. ''' SWAGGER_OBJ_V2_FIELDS = ('swagger', 'info', 'host', 'basePath', 'schemes', 'consumes', 'produces', 'paths', 'definitions', 'parameters', 'responses', 'securityDefinitions', 'security', 'tags', 'externalDocs') # SWAGGER OBJECT V2 Fields that are required by boto apigateway states. SWAGGER_OBJ_V2_FIELDS_REQUIRED = ('swagger', 'info', 'basePath', 'schemes', 'paths', 'definitions') # SWAGGER OPERATION NAMES SWAGGER_OPERATION_NAMES = ('get', 'put', 'post', 'delete', 'options', 'head', 'patch') SWAGGER_VERSIONS_SUPPORTED = ('2.0',) # VENDOR SPECIFIC FIELD PATTERNS VENDOR_EXT_PATTERN = re.compile('^x-') # JSON_SCHEMA_REF JSON_SCHEMA_DRAFT_4 = 'http://json-schema.org/draft-04/schema#' # AWS integration templates for normal and options methods REQUEST_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n' '{\n' '"header_params" : {\n' '#set ($map = $input.params().header)\n' '#foreach( $param in $map.entrySet() )\n' '"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n' '#end\n' '},\n' '"query_params" : {\n' '#set ($map = $input.params().querystring)\n' '#foreach( $param in $map.entrySet() )\n' '"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n' '#end\n' '},\n' '"path_params" : {\n' '#set ($map = $input.params().path)\n' '#foreach( $param in $map.entrySet() )\n' '"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n' '#end\n' '},\n' '"apigw_context" : {\n' '"apiId": "$context.apiId",\n' '"httpMethod": "$context.httpMethod",\n' '"requestId": "$context.requestId",\n' '"resourceId": "$context.resourceId",\n' '"resourcePath": "$context.resourcePath",\n' '"stage": "$context.stage",\n' '"identity": {\n' ' "user":"$context.identity.user",\n' ' "userArn":"$context.identity.userArn",\n' ' "userAgent":"$context.identity.userAgent",\n' ' "sourceIp":"$context.identity.sourceIp",\n' ' "cognitoIdentityId":"$context.identity.cognitoIdentityId",\n' ' "cognitoIdentityPoolId":"$context.identity.cognitoIdentityPoolId",\n' ' "cognitoAuthenticationType":"$context.identity.cognitoAuthenticationType",\n' ' "cognitoAuthenticationProvider":["$util.escapeJavaScript($context.identity.cognitoAuthenticationProvider)"],\n' ' "caller":"$context.identity.caller",\n' ' "apiKey":"$context.identity.apiKey",\n' ' "accountId":"$context.identity.accountId"\n' '}\n' '},\n' '"body_params" : $input.json(\'$\'),\n' '"stage_variables": {\n' '#foreach($variable in $stageVariables.keySet())\n' '"$variable": "$util.escapeJavaScript($stageVariables.get($variable))"\n' '#if($foreach.hasNext), #end\n' '#end\n' '}\n' '}'} REQUEST_OPTION_TEMPLATE = {'application/json': '{"statusCode": 200}'} # AWS integration response template mapping to convert stackTrace part or the error # to a uniform format containing strings only. Swagger does not seem to allow defining # an array of non-uniform types, to it is not possible to create error model to match # exactly what comes out of lambda functions in case of error. RESPONSE_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n' '{\n' ' "errorMessage" : "$inputRoot.errorMessage",\n' ' "errorType" : "$inputRoot.errorType",\n' ' "stackTrace" : [\n' '#foreach($stackTrace in $inputRoot.stackTrace)\n' ' [\n' '#foreach($elem in $stackTrace)\n' ' "$elem"\n' '#if($foreach.hasNext),#end\n' '#end\n' ' ]\n' '#if($foreach.hasNext),#end\n' '#end\n' ' ]\n' '}'} RESPONSE_OPTION_TEMPLATE = {} # This string should not be modified, every API created by this state will carry the description # below. AWS_API_DESCRIPTION = _dict_to_json_pretty({"provisioned_by": "Salt boto_apigateway.present State", "context": "See deployment or stage description"}) class SwaggerParameter(object): ''' This is a helper class for the Swagger Parameter Object ''' LOCATIONS = ('body', 'query', 'header', 'path') def __init__(self, paramdict): self._paramdict = paramdict @property def location(self): ''' returns location in the swagger parameter object ''' _location = self._paramdict.get('in') if _location in _Swagger.SwaggerParameter.LOCATIONS: return _location raise ValueError('Unsupported parameter location: {0} in Parameter Object'.format(_location)) @property def name(self): ''' returns parameter name in the swagger parameter object ''' _name = self._paramdict.get('name') if _name: if self.location == 'header': return 'method.request.header.{0}'.format(_name) elif self.location == 'query': return 'method.request.querystring.{0}'.format(_name) elif self.location == 'path': return 'method.request.path.{0}'.format(_name) return None raise ValueError('Parameter must have a name: {0}'.format(_dict_to_json_pretty(self._paramdict))) @property def schema(self): ''' returns the name of the schema given the reference in the swagger parameter object ''' if self.location == 'body': _schema = self._paramdict.get('schema') if _schema: if '$ref' in _schema: schema_name = _schema.get('$ref').split('/')[-1] return schema_name raise ValueError(('Body parameter must have a JSON reference ' 'to the schema definition due to Amazon API restrictions: {0}'.format(self.name))) raise ValueError('Body parameter must have a schema: {0}'.format(self.name)) return None class SwaggerMethodResponse(object): ''' Helper class for Swagger Method Response Object ''' def __init__(self, r): self._r = r @property def schema(self): ''' returns the name of the schema given the reference in the swagger method response object ''' _schema = self._r.get('schema') if _schema: if '$ref' in _schema: return _schema.get('$ref').split('/')[-1] raise ValueError(('Method response must have a JSON reference ' 'to the schema definition: {0}'.format(_schema))) return None @property def headers(self): ''' returns the headers dictionary in the method response object ''' _headers = self._r.get('headers', {}) return _headers def __init__(self, api_name, stage_name, lambda_funcname_format, swagger_file_path, error_response_template, response_template, common_aws_args): self._api_name = api_name self._stage_name = stage_name self._lambda_funcname_format = lambda_funcname_format self._common_aws_args = common_aws_args self._restApiId = '' self._deploymentId = '' self._error_response_template = error_response_template self._response_template = response_template if swagger_file_path is not None: if os.path.exists(swagger_file_path) and os.path.isfile(swagger_file_path): self._swagger_file = swagger_file_path self._md5_filehash = _gen_md5_filehash(self._swagger_file, error_response_template, response_template) with salt.utils.files.fopen(self._swagger_file, 'rb') as sf: self._cfg = salt.utils.yaml.safe_load(sf) self._swagger_version = '' else: raise IOError('Invalid swagger file path, {0}'.format(swagger_file_path)) self._validate_swagger_file() self._validate_lambda_funcname_format() self._resolve_api_id() def _is_http_error_rescode(self, code): ''' Helper function to determine if the passed code is in the 400~599 range of http error codes ''' return bool(re.match(r'^\s*[45]\d\d\s*$', code)) def _validate_error_response_model(self, paths, mods): ''' Helper function to help validate the convention established in the swagger file on how to handle response code mapping/integration ''' for path, ops in paths: for opname, opobj in six.iteritems(ops): if opname not in _Swagger.SWAGGER_OPERATION_NAMES: continue if 'responses' not in opobj: raise ValueError('missing mandatory responses field in path item object') for rescode, resobj in six.iteritems(opobj.get('responses')): if not self._is_http_error_rescode(str(rescode)): # future lint: disable=blacklisted-function continue # only check for response code from 400-599 if 'schema' not in resobj: raise ValueError('missing schema field in path {0}, ' 'op {1}, response {2}'.format(path, opname, rescode)) schemaobj = resobj.get('schema') if '$ref' not in schemaobj: raise ValueError('missing $ref field under schema in ' 'path {0}, op {1}, response {2}'.format(path, opname, rescode)) schemaobjref = schemaobj.get('$ref', '/') modelname = schemaobjref.split('/')[-1] if modelname not in mods: raise ValueError('model schema {0} reference not found ' 'under /definitions'.format(schemaobjref)) model = mods.get(modelname) if model.get('type') != 'object': raise ValueError('model schema {0} must be type object'.format(modelname)) if 'properties' not in model: raise ValueError('model schema {0} must have properties fields'.format(modelname)) modelprops = model.get('properties') if 'errorMessage' not in modelprops: raise ValueError('model schema {0} must have errorMessage as a property to ' 'match AWS convention. If pattern is not set, .+ will ' 'be used'.format(modelname)) def _validate_lambda_funcname_format(self): ''' Checks if the lambda function name format contains only known elements :return: True on success, ValueError raised on error ''' try: if self._lambda_funcname_format: known_kwargs = dict(stage='', api='', resource='', method='') self._lambda_funcname_format.format(**known_kwargs) return True except Exception: raise ValueError('Invalid lambda_funcname_format {0}. Please review ' 'documentation for known substitutable keys'.format(self._lambda_funcname_format)) def _validate_swagger_file(self): ''' High level check/validation of the input swagger file based on https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md This is not a full schema compliance check, but rather make sure that the input file (YAML or JSON) can be read into a dictionary, and we check for the content of the Swagger Object for version and info. ''' # check for any invalid fields for Swagger Object V2 for field in self._cfg: if (field not in _Swagger.SWAGGER_OBJ_V2_FIELDS and not _Swagger.VENDOR_EXT_PATTERN.match(field)): raise ValueError('Invalid Swagger Object Field: {0}'.format(field)) # check for Required Swagger fields by Saltstack boto apigateway state for field in _Swagger.SWAGGER_OBJ_V2_FIELDS_REQUIRED: if field not in self._cfg: raise ValueError('Missing Swagger Object Field: {0}'.format(field)) # check for Swagger Version self._swagger_version = self._cfg.get('swagger') if self._swagger_version not in _Swagger.SWAGGER_VERSIONS_SUPPORTED: raise ValueError('Unsupported Swagger version: {0},' 'Supported versions are {1}'.format(self._swagger_version, _Swagger.SWAGGER_VERSIONS_SUPPORTED)) log.info(type(self._models)) self._validate_error_response_model(self.paths, self._models()) @property def md5_filehash(self): ''' returns md5 hash for the swagger file ''' return self._md5_filehash @property def info(self): ''' returns the swagger info object as a dictionary ''' info = self._cfg.get('info') if not info: raise ValueError('Info Object has no values') return info @property def info_json(self): ''' returns the swagger info object as a pretty printed json string. ''' return _dict_to_json_pretty(self.info) @property def rest_api_name(self): ''' returns the name of the api ''' return self._api_name @property def rest_api_version(self): ''' returns the version field in the swagger info object ''' version = self.info.get('version') if not version: raise ValueError('Missing version value in Info Object') return version def _models(self): ''' returns an iterator for the models specified in the swagger file ''' models = self._cfg.get('definitions') if not models: raise ValueError('Definitions Object has no values, You need to define them in your swagger file') return models def models(self): ''' generator to return the tuple of model and its schema to create on aws. ''' model_dict = self._build_all_dependencies() while True: model = self._get_model_without_dependencies(model_dict) if not model: break yield (model, self._models().get(model)) @property def paths(self): ''' returns an iterator for the relative resource paths specified in the swagger file ''' paths = self._cfg.get('paths') if not paths: raise ValueError('Paths Object has no values, You need to define them in your swagger file') for path in paths: if not path.startswith('/'): raise ValueError('Path object {0} should start with /. Please fix it'.format(path)) return six.iteritems(paths) @property def basePath(self): ''' returns the base path field as defined in the swagger file ''' basePath = self._cfg.get('basePath', '') return basePath @property def restApiId(self): ''' returns the rest api id as returned by AWS on creation of the rest api ''' return self._restApiId @restApiId.setter def restApiId(self, restApiId): ''' allows the assignment of the rest api id on creation of the rest api ''' self._restApiId = restApiId @property def deployment_label_json(self): ''' this property returns the unique description in pretty printed json for a particular api deployment ''' return _dict_to_json_pretty(self.deployment_label) @property def deployment_label(self): ''' this property returns the deployment label dictionary (mainly used by stage description) ''' label = dict() label['swagger_info_object'] = self.info label['api_name'] = self.rest_api_name label['swagger_file'] = os.path.basename(self._swagger_file) label['swagger_file_md5sum'] = self.md5_filehash return label # methods to interact with boto_apigateway execution modules def _one_or_more_stages_remain(self, deploymentId): ''' Helper function to find whether there are other stages still associated with a deployment ''' stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args).get('stages') return bool(stages) def no_more_deployments_remain(self): ''' Helper function to find whether there are deployments left with stages associated ''' no_more_deployments = True deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId, **self._common_aws_args).get('deployments') if deployments: for deployment in deployments: deploymentId = deployment.get('id') stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args).get('stages') if stages: no_more_deployments = False break return no_more_deployments def _get_current_deployment_id(self): ''' Helper method to find the deployment id that the stage name is currently assocaited with. ''' deploymentId = '' stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, **self._common_aws_args).get('stage') if stage: deploymentId = stage.get('deploymentId') return deploymentId def _get_current_deployment_label(self): ''' Helper method to find the deployment label that the stage_name is currently associated with. ''' deploymentId = self._get_current_deployment_id() deployment = __salt__['boto_apigateway.describe_api_deployment'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args).get('deployment') if deployment: return deployment.get('description') return None def _get_desired_deployment_id(self): ''' Helper method to return the deployment id matching the desired deployment label for this Swagger object based on the given api_name, swagger_file ''' deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId, **self._common_aws_args).get('deployments') if deployments: for deployment in deployments: if deployment.get('description') == self.deployment_label_json: return deployment.get('id') return '' def overwrite_stage_variables(self, ret, stage_variables): ''' overwrite the given stage_name's stage variables with the given stage_variables ''' res = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId, stageName=self._stage_name, variables=stage_variables, **self._common_aws_args) if not res.get('overwrite'): ret['result'] = False ret['abort'] = True ret['comment'] = res.get('error') else: ret = _log_changes(ret, 'overwrite_stage_variables', res.get('stage')) return ret def _set_current_deployment(self, stage_desc_json, stage_variables): ''' Helper method to associate the stage_name to the given deploymentId and make this current ''' stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, **self._common_aws_args).get('stage') if not stage: stage = __salt__['boto_apigateway.create_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, deploymentId=self._deploymentId, description=stage_desc_json, variables=stage_variables, **self._common_aws_args) if not stage.get('stage'): return {'set': False, 'error': stage.get('error')} else: # overwrite the stage variables overwrite = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId, stageName=self._stage_name, variables=stage_variables, **self._common_aws_args) if not overwrite.get('stage'): return {'set': False, 'error': overwrite.get('error')} return __salt__['boto_apigateway.activate_api_deployment'](restApiId=self.restApiId, stageName=self._stage_name, deploymentId=self._deploymentId, **self._common_aws_args) def _resolve_api_id(self): ''' returns an Api Id that matches the given api_name and the hardcoded _Swagger.AWS_API_DESCRIPTION as the api description ''' apis = __salt__['boto_apigateway.describe_apis'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args).get('restapi') if apis: if len(apis) == 1: self.restApiId = apis[0].get('id') else: raise ValueError('Multiple APIs matching given name {0} and ' 'description {1}'.format(self.rest_api_name, self.info_json)) def delete_stage(self, ret): ''' Method to delete the given stage_name. If the current deployment tied to the given stage_name has no other stages associated with it, the deployment will be removed as well ''' deploymentId = self._get_current_deployment_id() if deploymentId: result = __salt__['boto_apigateway.delete_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, **self._common_aws_args) if not result.get('deleted'): ret['abort'] = True ret['result'] = False ret['comment'] = 'delete_stage delete_api_stage, {0}'.format(result.get('error')) else: # check if it is safe to delete the deployment as well. if not self._one_or_more_stages_remain(deploymentId): result = __salt__['boto_apigateway.delete_api_deployment'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args) if not result.get('deleted'): ret['abort'] = True ret['result'] = False ret['comment'] = 'delete_stage delete_api_deployment, {0}'.format(result.get('error')) else: ret['comment'] = 'stage {0} has been deleted.\n'.format(self._stage_name) else: # no matching stage_name/deployment found ret['comment'] = 'stage {0} does not exist'.format(self._stage_name) return ret def verify_api(self, ret): ''' this method helps determine if the given stage_name is already on a deployment label matching the input api_name, swagger_file. If yes, returns abort with comment indicating already at desired state. If not and there is previous deployment labels in AWS matching the given input api_name and swagger file, indicate to the caller that we only need to reassociate stage_name to the previously existing deployment label. ''' if self.restApiId: deployed_label_json = self._get_current_deployment_label() if deployed_label_json == self.deployment_label_json: ret['comment'] = ('Already at desired state, the stage {0} is already at the desired ' 'deployment label:\n{1}'.format(self._stage_name, deployed_label_json)) ret['current'] = True return ret else: self._deploymentId = self._get_desired_deployment_id() if self._deploymentId: ret['publish'] = True return ret def publish_api(self, ret, stage_variables): ''' this method tie the given stage_name to a deployment matching the given swagger_file ''' stage_desc = dict() stage_desc['current_deployment_label'] = self.deployment_label stage_desc_json = _dict_to_json_pretty(stage_desc) if self._deploymentId: # just do a reassociate of stage_name to an already existing deployment res = self._set_current_deployment(stage_desc_json, stage_variables) if not res.get('set'): ret['abort'] = True ret['result'] = False ret['comment'] = res.get('error') else: ret = _log_changes(ret, 'publish_api (reassociate deployment, set stage_variables)', res.get('response')) else: # no deployment existed for the given swagger_file for this Swagger object res = __salt__['boto_apigateway.create_api_deployment'](restApiId=self.restApiId, stageName=self._stage_name, stageDescription=stage_desc_json, description=self.deployment_label_json, variables=stage_variables, **self._common_aws_args) if not res.get('created'): ret['abort'] = True ret['result'] = False ret['comment'] = res.get('error') else: ret = _log_changes(ret, 'publish_api (new deployment)', res.get('deployment')) return ret def _cleanup_api(self): ''' Helper method to clean up resources and models if we detected a change in the swagger file for a stage ''' resources = __salt__['boto_apigateway.describe_api_resources'](restApiId=self.restApiId, **self._common_aws_args) if resources.get('resources'): res = resources.get('resources')[1:] res.reverse() for resource in res: delres = __salt__['boto_apigateway.delete_api_resources'](restApiId=self.restApiId, path=resource.get('path'), **self._common_aws_args) if not delres.get('deleted'): return delres models = __salt__['boto_apigateway.describe_api_models'](restApiId=self.restApiId, **self._common_aws_args) if models.get('models'): for model in models.get('models'): delres = __salt__['boto_apigateway.delete_api_model'](restApiId=self.restApiId, modelName=model.get('name'), **self._common_aws_args) if not delres.get('deleted'): return delres return {'deleted': True} def deploy_api(self, ret): ''' this method create the top level rest api in AWS apigateway ''' if self.restApiId: res = self._cleanup_api() if not res.get('deleted'): ret['comment'] = 'Failed to cleanup restAreId {0}'.format(self.restApiId) ret['abort'] = True ret['result'] = False return ret return ret response = __salt__['boto_apigateway.create_api'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args) if not response.get('created'): ret['result'] = False ret['abort'] = True if 'error' in response: ret['comment'] = 'Failed to create rest api: {0}.'.format(response['error']['message']) return ret self.restApiId = response.get('restapi', {}).get('id') return _log_changes(ret, 'deploy_api', response.get('restapi')) def delete_api(self, ret): ''' Method to delete a Rest Api named defined in the swagger file's Info Object's title value. ret a dictionary for returning status to Saltstack ''' exists_response = __salt__['boto_apigateway.api_exists'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args) if exists_response.get('exists'): if __opts__['test']: ret['comment'] = 'Rest API named {0} is set to be deleted.'.format(self.rest_api_name) ret['result'] = None ret['abort'] = True return ret delete_api_response = __salt__['boto_apigateway.delete_api'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args) if not delete_api_response.get('deleted'): ret['result'] = False ret['abort'] = True if 'error' in delete_api_response: ret['comment'] = 'Failed to delete rest api: {0}.'.format(delete_api_response['error']['message']) return ret ret = _log_changes(ret, 'delete_api', delete_api_response) else: ret['comment'] = ('api already absent for swagger file: ' '{0}, desc: {1}'.format(self.rest_api_name, self.info_json)) return ret def _aws_model_ref_from_swagger_ref(self, r): ''' Helper function to reference models created on aws apigw ''' model_name = r.split('/')[-1] return 'https://apigateway.amazonaws.com/restapis/{0}/models/{1}'.format(self.restApiId, model_name) def _update_schema_to_aws_notation(self, schema): ''' Helper function to map model schema to aws notation ''' result = {} for k, v in schema.items(): if k == '$ref': v = self._aws_model_ref_from_swagger_ref(v) if isinstance(v, dict): v = self._update_schema_to_aws_notation(v) result[k] = v return result def _build_dependent_model_list(self, obj_schema): ''' Helper function to build the list of models the given object schema is referencing. ''' dep_models_list = [] if obj_schema: obj_schema['type'] = obj_schema.get('type', 'object') if obj_schema['type'] == 'array': dep_models_list.extend(self._build_dependent_model_list(obj_schema.get('items', {}))) else: ref = obj_schema.get('$ref') if ref: ref_obj_model = ref.split("/")[-1] ref_obj_schema = self._models().get(ref_obj_model) dep_models_list.extend(self._build_dependent_model_list(ref_obj_schema)) dep_models_list.extend([ref_obj_model]) else: # need to walk each property object properties = obj_schema.get('properties') if properties: for _, prop_obj_schema in six.iteritems(properties): dep_models_list.extend(self._build_dependent_model_list(prop_obj_schema)) return list(set(dep_models_list)) def _build_all_dependencies(self): ''' Helper function to build a map of model to their list of model reference dependencies ''' ret = {} for model, schema in six.iteritems(self._models()): dep_list = self._build_dependent_model_list(schema) ret[model] = dep_list return ret def _get_model_without_dependencies(self, models_dict): ''' Helper function to find the next model that should be created ''' next_model = None if not models_dict: return next_model for model, dependencies in six.iteritems(models_dict): if dependencies == []: next_model = model break if next_model is None: raise ValueError('incomplete model definitions, models in dependency ' 'list not defined: {0}'.format(models_dict)) # remove the model from other depednencies before returning models_dict.pop(next_model) for model, dep_list in six.iteritems(models_dict): if next_model in dep_list: dep_list.remove(next_model) return next_model def deploy_models(self, ret): ''' Method to deploy swagger file's definition objects and associated schema to AWS Apigateway as Models ret a dictionary for returning status to Saltstack ''' for model, schema in self.models(): # add in a few attributes into the model schema that AWS expects # _schema = schema.copy() _schema = self._update_schema_to_aws_notation(schema) _schema.update({'$schema': _Swagger.JSON_SCHEMA_DRAFT_4, 'title': '{0} Schema'.format(model)}) # check to see if model already exists, aws has 2 default models [Empty, Error] # which may need upate with data from swagger file model_exists_response = __salt__['boto_apigateway.api_model_exists'](restApiId=self.restApiId, modelName=model, **self._common_aws_args) if model_exists_response.get('exists'): update_model_schema_response = ( __salt__['boto_apigateway.update_api_model_schema'](restApiId=self.restApiId, modelName=model, schema=_dict_to_json_pretty(_schema), **self._common_aws_args)) if not update_model_schema_response.get('updated'): ret['result'] = False ret['abort'] = True if 'error' in update_model_schema_response: ret['comment'] = ('Failed to update existing model {0} with schema {1}, ' 'error: {2}'.format(model, _dict_to_json_pretty(schema), update_model_schema_response['error']['message'])) return ret ret = _log_changes(ret, 'deploy_models', update_model_schema_response) else: create_model_response = ( __salt__['boto_apigateway.create_api_model'](restApiId=self.restApiId, modelName=model, modelDescription=model, schema=_dict_to_json_pretty(_schema), contentType='application/json', **self._common_aws_args)) if not create_model_response.get('created'): ret['result'] = False ret['abort'] = True if 'error' in create_model_response: ret['comment'] = ('Failed to create model {0}, schema {1}, ' 'error: {2}'.format(model, _dict_to_json_pretty(schema), create_model_response['error']['message'])) return ret ret = _log_changes(ret, 'deploy_models', create_model_response) return ret def _lambda_name(self, resourcePath, httpMethod): ''' Helper method to construct lambda name based on the rule specified in doc string of boto_apigateway.api_present function ''' lambda_name = self._lambda_funcname_format.format(stage=self._stage_name, api=self.rest_api_name, resource=resourcePath, method=httpMethod) lambda_name = lambda_name.strip() lambda_name = re.sub(r'{|}', '', lambda_name) lambda_name = re.sub(r'\s+|/', '_', lambda_name).lower() return re.sub(r'_+', '_', lambda_name) def _lambda_uri(self, lambda_name, lambda_region): ''' Helper Method to construct the lambda uri for use in method integration ''' profile = self._common_aws_args.get('profile') region = self._common_aws_args.get('region') lambda_region = __utils__['boto3.get_region']('lambda', lambda_region, profile) apigw_region = __utils__['boto3.get_region']('apigateway', region, profile) lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args) if lambda_region != apigw_region: if not lambda_desc.get('function'): # try look up in the same region as the apigateway as well if previous lookup failed lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args) if not lambda_desc.get('function'): raise ValueError('Could not find lambda function {0} in ' 'regions [{1}, {2}].'.format(lambda_name, lambda_region, apigw_region)) lambda_arn = lambda_desc.get('function').get('FunctionArn') lambda_uri = ('arn:aws:apigateway:{0}:lambda:path/2015-03-31' '/functions/{1}/invocations'.format(apigw_region, lambda_arn)) return lambda_uri def _parse_method_data(self, method_name, method_data): ''' Helper function to construct the method request params, models, request_templates and integration_type values needed to configure method request integration/mappings. ''' method_params = {} method_models = {} if 'parameters' in method_data: for param in method_data['parameters']: p = _Swagger.SwaggerParameter(param) if p.name: method_params[p.name] = True if p.schema: method_models['application/json'] = p.schema request_templates = _Swagger.REQUEST_OPTION_TEMPLATE if method_name == 'options' else _Swagger.REQUEST_TEMPLATE integration_type = "MOCK" if method_name == 'options' else "AWS" return {'params': method_params, 'models': method_models, 'request_templates': request_templates, 'integration_type': integration_type} def _find_patterns(self, o): result = [] if isinstance(o, dict): for k, v in six.iteritems(o): if isinstance(v, dict): result.extend(self._find_patterns(v)) else: if k == 'pattern': result.append(v) return result def _get_pattern_for_schema(self, schema_name, httpStatus): ''' returns the pattern specified in a response schema ''' defaultPattern = '.+' if self._is_http_error_rescode(httpStatus) else '.*' model = self._models().get(schema_name) patterns = self._find_patterns(model) return patterns[0] if patterns else defaultPattern def _get_response_template(self, method_name, http_status): if method_name == 'options' or not self._is_http_error_rescode(http_status): response_templates = {'application/json': self._response_template} \ if self._response_template else self.RESPONSE_OPTION_TEMPLATE else: response_templates = {'application/json': self._error_response_template} \ if self._error_response_template else self.RESPONSE_TEMPLATE return response_templates def _parse_method_response(self, method_name, method_response, httpStatus): ''' Helper function to construct the method response params, models, and integration_params values needed to configure method response integration/mappings. ''' method_response_models = {} method_response_pattern = '.*' if method_response.schema: method_response_models['application/json'] = method_response.schema method_response_pattern = self._get_pattern_for_schema(method_response.schema, httpStatus) method_response_params = {} method_integration_response_params = {} for header in method_response.headers: response_header = 'method.response.header.{0}'.format(header) method_response_params[response_header] = False header_data = method_response.headers.get(header) method_integration_response_params[response_header] = ( "'{0}'".format(header_data.get('default')) if 'default' in header_data else "'*'") response_templates = self._get_response_template(method_name, httpStatus) return {'params': method_response_params, 'models': method_response_models, 'integration_params': method_integration_response_params, 'pattern': method_response_pattern, 'response_templates': response_templates} def _deploy_method(self, ret, resource_path, method_name, method_data, api_key_required, lambda_integration_role, lambda_region, authorization_type): ''' Method to create a method for the given resource path, along with its associated request and response integrations. ret a dictionary for returning status to Saltstack resource_path the full resource path where the named method_name will be associated with. method_name a string that is one of the following values: 'delete', 'get', 'head', 'options', 'patch', 'post', 'put' method_data the value dictionary for this method in the swagger definition file. api_key_required True or False, whether api key is required to access this method. lambda_integration_role name of the IAM role or IAM role arn that Api Gateway will assume when executing the associated lambda function lambda_region the region for the lambda function that Api Gateway will integrate to. authorization_type 'NONE' or 'AWS_IAM' ''' method = self._parse_method_data(method_name.lower(), method_data) # for options method to enable CORS, api_key_required will be set to False always. # authorization_type will be set to 'NONE' always. if method_name.lower() == 'options': api_key_required = False authorization_type = 'NONE' m = __salt__['boto_apigateway.create_api_method'](restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), authorizationType=authorization_type, apiKeyRequired=api_key_required, requestParameters=method.get('params'), requestModels=method.get('models'), **self._common_aws_args) if not m.get('created'): ret = _log_error_and_abort(ret, m) return ret ret = _log_changes(ret, '_deploy_method.create_api_method', m) lambda_uri = "" if method_name.lower() != 'options': lambda_uri = self._lambda_uri(self._lambda_name(resource_path, method_name), lambda_region=lambda_region) # NOTE: integration method is set to POST always, as otherwise AWS makes wrong assumptions # about the intent of the call. HTTP method will be passed to lambda as part of the API gateway context integration = ( __salt__['boto_apigateway.create_api_integration'](restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), integrationType=method.get('integration_type'), integrationHttpMethod='POST', uri=lambda_uri, credentials=lambda_integration_role, requestTemplates=method.get('request_templates'), **self._common_aws_args)) if not integration.get('created'): ret = _log_error_and_abort(ret, integration) return ret ret = _log_changes(ret, '_deploy_method.create_api_integration', integration) if 'responses' in method_data: for response, response_data in six.iteritems(method_data['responses']): httpStatus = str(response) # future lint: disable=blacklisted-function method_response = self._parse_method_response(method_name.lower(), _Swagger.SwaggerMethodResponse(response_data), httpStatus) mr = __salt__['boto_apigateway.create_api_method_response']( restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), statusCode=httpStatus, responseParameters=method_response.get('params'), responseModels=method_response.get('models'), **self._common_aws_args) if not mr.get('created'): ret = _log_error_and_abort(ret, mr) return ret ret = _log_changes(ret, '_deploy_method.create_api_method_response', mr) mir = __salt__['boto_apigateway.create_api_integration_response']( restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), statusCode=httpStatus, selectionPattern=method_response.get('pattern'), responseParameters=method_response.get('integration_params'), responseTemplates=method_response.get('response_templates'), **self._common_aws_args) if not mir.get('created'): ret = _log_error_and_abort(ret, mir) return ret ret = _log_changes(ret, '_deploy_method.create_api_integration_response', mir) else: raise ValueError('No responses specified for {0} {1}'.format(resource_path, method_name)) return ret def deploy_resources(self, ret, api_key_required, lambda_integration_role, lambda_region, authorization_type): ''' Method to deploy resources defined in the swagger file. ret a dictionary for returning status to Saltstack api_key_required True or False, whether api key is required to access this method. lambda_integration_role name of the IAM role or IAM role arn that Api Gateway will assume when executing the associated lambda function lambda_region the region for the lambda function that Api Gateway will integrate to. authorization_type 'NONE' or 'AWS_IAM' ''' for path, pathData in self.paths: resource = __salt__['boto_apigateway.create_api_resources'](restApiId=self.restApiId, path=path, **self._common_aws_args) if not resource.get('created'): ret = _log_error_and_abort(ret, resource) return ret ret = _log_changes(ret, 'deploy_resources', resource) for method, method_data in six.iteritems(pathData): if method in _Swagger.SWAGGER_OPERATION_NAMES: ret = self._deploy_method(ret, path, method, method_data, api_key_required, lambda_integration_role, lambda_region, authorization_type) return ret def usage_plan_present(name, plan_name, description=None, throttle=None, quota=None, region=None, key=None, keyid=None, profile=None): ''' Ensure the spcifieda usage plan with the corresponding metrics is deployed .. versionadded:: 2017.7.0 name name of the state plan_name [Required] name of the usage plan throttle [Optional] throttling parameters expressed as a dictionary. If provided, at least one of the throttling parameters must be present rateLimit rate per second at which capacity bucket is populated burstLimit maximum rate allowed quota [Optional] quota on the number of api calls permitted by the plan. If provided, limit and period must be present limit [Required] number of calls permitted per quota period offset [Optional] number of calls to be subtracted from the limit at the beginning of the period period [Required] period to which quota applies. Must be DAY, WEEK or MONTH .. code-block:: yaml UsagePlanPresent: boto_apigateway.usage_plan_present: - plan_name: my_usage_plan - throttle: rateLimit: 70 burstLimit: 100 - quota: limit: 1000 offset: 0 period: DAY - profile: my_profile ''' func_params = locals() ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) existing = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args) if 'error' in existing: ret['result'] = False ret['comment'] = 'Failed to describe existing usage plans' return ret if not existing['plans']: # plan does not exist, we need to create it if __opts__['test']: ret['comment'] = 'a new usage plan {0} would be created'.format(plan_name) ret['result'] = None return ret result = __salt__['boto_apigateway.create_usage_plan'](name=plan_name, description=description, throttle=throttle, quota=quota, **common_args) if 'error' in result: ret['result'] = False ret['comment'] = 'Failed to create a usage plan {0}, {1}'.format(plan_name, result['error']) return ret ret['changes']['old'] = {'plan': None} ret['comment'] = 'A new usage plan {0} has been created'.format(plan_name) else: # need an existing plan modified to match given value plan = existing['plans'][0] needs_updating = False modifiable_params = (('throttle', ('rateLimit', 'burstLimit')), ('quota', ('limit', 'offset', 'period'))) for p, fields in modifiable_params: for f in fields: actual_param = {} if func_params.get(p) is None else func_params.get(p) if plan.get(p, {}).get(f, None) != actual_param.get(f, None): needs_updating = True break if not needs_updating: ret['comment'] = 'usage plan {0} is already in a correct state'.format(plan_name) ret['result'] = True return ret if __opts__['test']: ret['comment'] = 'a new usage plan {0} would be updated'.format(plan_name) ret['result'] = None return ret result = __salt__['boto_apigateway.update_usage_plan'](plan['id'], throttle=throttle, quota=quota, **common_args) if 'error' in result: ret['result'] = False ret['comment'] = 'Failed to update a usage plan {0}, {1}'.format(plan_name, result['error']) return ret ret['changes']['old'] = {'plan': plan} ret['comment'] = 'usage plan {0} has been updated'.format(plan_name) newstate = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args) if 'error' in existing: ret['result'] = False ret['comment'] = 'Failed to describe existing usage plans after updates' return ret ret['changes']['new'] = {'plan': newstate['plans'][0]} except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret def usage_plan_absent(name, plan_name, region=None, key=None, keyid=None, profile=None): ''' Ensures usage plan identified by name is no longer present .. versionadded:: 2017.7.0 name name of the state plan_name name of the plan to remove .. code-block:: yaml usage plan absent: boto_apigateway.usage_plan_absent: - plan_name: my_usage_plan - profile: my_profile ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) existing = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args) if 'error' in existing: ret['result'] = False ret['comment'] = 'Failed to describe existing usage plans' return ret if not existing['plans']: ret['comment'] = 'Usage plan {0} does not exist already'.format(plan_name) return ret if __opts__['test']: ret['comment'] = 'Usage plan {0} exists and would be deleted'.format(plan_name) ret['result'] = None return ret plan_id = existing['plans'][0]['id'] result = __salt__['boto_apigateway.delete_usage_plan'](plan_id, **common_args) if 'error' in result: ret['result'] = False ret['comment'] = 'Failed to delete usage plan {0}, {1}'.format(plan_name, result) return ret ret['comment'] = 'Usage plan {0} has been deleted'.format(plan_name) ret['changes']['old'] = {'plan': existing['plans'][0]} ret['changes']['new'] = {'plan': None} except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret def usage_plan_association_present(name, plan_name, api_stages, region=None, key=None, keyid=None, profile=None): ''' Ensures usage plan identified by name is added to provided api_stages .. versionadded:: 2017.7.0 name name of the state plan_name name of the plan to use api_stages list of dictionaries, where each dictionary consists of the following keys: apiId apiId of the api to attach usage plan to stage stage name of the api to attach usage plan to .. code-block:: yaml UsagePlanAssociationPresent: boto_apigateway.usage_plan_association_present: - plan_name: my_plan - api_stages: - apiId: 9kb0404ec0 stage: my_stage - apiId: l9v7o2aj90 stage: my_stage - profile: my_profile ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) existing = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args) if 'error' in existing: ret['result'] = False ret['comment'] = 'Failed to describe existing usage plans' return ret if not existing['plans']: ret['comment'] = 'Usage plan {0} does not exist'.format(plan_name) ret['result'] = False return ret if len(existing['plans']) != 1: ret['comment'] = 'There are multiple usage plans with the same name - it is not supported' ret['result'] = False return ret plan = existing['plans'][0] plan_id = plan['id'] plan_stages = plan.get('apiStages', []) stages_to_add = [] for api in api_stages: if api not in plan_stages: stages_to_add.append(api) if not stages_to_add: ret['comment'] = 'Usage plan is already asssociated to all api stages' return ret result = __salt__['boto_apigateway.attach_usage_plan_to_apis'](plan_id, stages_to_add, **common_args) if 'error' in result: ret['comment'] = 'Failed to associate a usage plan {0} to the apis {1}, {2}'.format(plan_name, stages_to_add, result['error']) ret['result'] = False return ret ret['comment'] = 'successfully associated usage plan to apis' ret['changes']['old'] = plan_stages ret['changes']['new'] = result.get('result', {}).get('apiStages', []) except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret def usage_plan_association_absent(name, plan_name, api_stages, region=None, key=None, keyid=None, profile=None): ''' Ensures usage plan identified by name is removed from provided api_stages If a plan is associated to stages not listed in api_stages parameter, those associations remain intact. .. versionadded:: 2017.7.0 name name of the state plan_name name of the plan to use api_stages list of dictionaries, where each dictionary consists of the following keys: apiId apiId of the api to detach usage plan from stage stage name of the api to detach usage plan from .. code-block:: yaml UsagePlanAssociationAbsent: boto_apigateway.usage_plan_association_absent: - plan_name: my_plan - api_stages: - apiId: 9kb0404ec0 stage: my_stage - apiId: l9v7o2aj90 stage: my_stage - profile: my_profile ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) existing = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args) if 'error' in existing: ret['result'] = False ret['comment'] = 'Failed to describe existing usage plans' return ret if not existing['plans']: ret['comment'] = 'Usage plan {0} does not exist'.format(plan_name) ret['result'] = False return ret if len(existing['plans']) != 1: ret['comment'] = 'There are multiple usage plans with the same name - it is not supported' ret['result'] = False return ret plan = existing['plans'][0] plan_id = plan['id'] plan_stages = plan.get('apiStages', []) if not plan_stages: ret['comment'] = 'Usage plan {0} has no associated stages already'.format(plan_name) return ret stages_to_remove = [] for api in api_stages: if api in plan_stages: stages_to_remove.append(api) if not stages_to_remove: ret['comment'] = 'Usage plan is already not asssociated to any api stages' return ret result = __salt__['boto_apigateway.detach_usage_plan_from_apis'](plan_id, stages_to_remove, **common_args) if 'error' in result: ret['comment'] = 'Failed to disassociate a usage plan {0} from the apis {1}, {2}'.format(plan_name, stages_to_remove, result['error']) ret['result'] = False return ret ret['comment'] = 'successfully disassociated usage plan from apis' ret['changes']['old'] = plan_stages ret['changes']['new'] = result.get('result', {}).get('apiStages', []) except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret
saltstack/salt
salt/states/boto_apigateway.py
_get_stage_variables
python
def _get_stage_variables(stage_variables): ''' Helper function to retrieve stage variables from pillars/options, if the input is a string ''' ret = dict() if stage_variables is None: return ret if isinstance(stage_variables, six.string_types): if stage_variables in __opts__: ret = __opts__[stage_variables] master_opts = __pillar__.get('master', {}) if stage_variables in master_opts: ret = master_opts[stage_variables] if stage_variables in __pillar__: ret = __pillar__[stage_variables] elif isinstance(stage_variables, dict): ret = stage_variables if not isinstance(ret, dict): ret = dict() return ret
Helper function to retrieve stage variables from pillars/options, if the input is a string
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_apigateway.py#L342-L365
null
# -*- coding: utf-8 -*- ''' Manage Apigateway Rest APIs =========================== .. versionadded:: 2016.11.0 :depends: - boto >= 2.8.0 - boto3 >= 1.2.1 - botocore >= 1.4.49 Create and destroy rest apis depending on a swagger version 2 definition file. Be aware that this interacts with Amazon's services, and so may incur charges. This module uses ``boto3``, which can be installed via package, or pip. This module accepts explicit vpc credentials but can also utilize IAM roles assigned to the instance through Instance Profiles. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More information available `here <http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html>`_. If IAM roles are not used you need to specify them either in a pillar file or in the minion's config file: .. code-block:: yaml vpc.keyid: GKTADJGHEIQSXMKKRBJ08H vpc.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs It's also possible to specify ``key``, ``keyid`` and ``region`` via a profile, either passed in as a dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 .. code-block:: yaml Ensure Apigateway API exists: boto_apigateway.present: - name: myfunction - region: us-east-1 - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import hashlib import logging import os import re # Import Salt Libs import salt.utils.files import salt.utils.json import salt.utils.yaml # Import 3rd-party libs from salt.ext import six log = logging.getLogger(__name__) def __virtual__(): ''' Only load if boto is available. ''' return 'boto_apigateway' if 'boto_apigateway.describe_apis' in __salt__ else False def present(name, api_name, swagger_file, stage_name, api_key_required, lambda_integration_role, lambda_region=None, stage_variables=None, region=None, key=None, keyid=None, profile=None, lambda_funcname_format='{stage}_{api}_{resource}_{method}', authorization_type='NONE', error_response_template=None, response_template=None): ''' Ensure the spcified api_name with the corresponding swaggerfile is deployed to the given stage_name in AWS ApiGateway. this state currently only supports ApiGateway integration with AWS Lambda, and CORS support is handled through a Mock integration. There may be multiple deployments for the API object, each deployment is tagged with a description (i.e. unique label) in pretty printed json format consisting of the following key/values. .. code-block:: text { "api_name": api_name, "swagger_file": basename_of_swagger_file "swagger_file_md5sum": md5sum_of_swagger_file, "swagger_info_object": info_object_content_in_swagger_file } Please note that the name of the lambda function to be integrated will be derived via the provided lambda_funcname_format parameters: - the default lambda_funcname_format is a string with the following substitutable keys: "{stage}_{api}_{resource}_{method}". The user can choose to reorder the known keys. - the stage key corresponds to the stage_name passed in. - the api key corresponds to the api_name passed in. - the resource corresponds to the resource path defined in the passed swagger file. - the method corresponds to the method for a resource path defined in the passed swagger file. For the default lambda_funcname_format, given the following input: .. code-block:: python api_name = ' Test Service' stage_name = 'alpha' basePath = '/api' path = '/a/{b}/c' method = 'POST' We will end up with the following Lambda Function Name that will be looked up: 'test_service_alpha_a_b_c_post' The canconicalization of these input parameters is done in the following order: 1. lambda_funcname_format is formatted with the input parameters as passed, 2. resulting string is stripped for leading/trailing spaces, 3. path parameter's curly braces are removed from the resource path, 4. consecutive spaces and forward slashes in the paths are replaced with '_' 5. consecutive '_' are replaced with '_' Please note that for error response handling, the swagger file must have an error response model with the following schema. The lambda functions should throw exceptions for any non successful responses. An optional pattern field can be specified in errorMessage field to aid the response mapping from Lambda to the proper error return status codes. .. code-block:: yaml Error: type: object properties: stackTrace: type: array items: type: array items: type: string description: call stack errorType: type: string description: error type errorMessage: type: string description: | Error message, will be matched based on pattern. If no pattern is specified, the default pattern used for response mapping will be +*. name The name of the state definition api_name The name of the rest api that we want to ensure exists in AWS API Gateway swagger_file Name of the location of the swagger rest api definition file in YAML format. stage_name Name of the stage we want to be associated with the given api_name and swagger_file definition api_key_required True or False - whether the API Key is required to call API methods lambda_integration_role The name or ARN of the IAM role that the AWS ApiGateway assumes when it executes your lambda function to handle incoming requests lambda_region The region where we expect to find the lambda functions. This is used to determine the region where we should look for the Lambda Function for integration purposes. The region determination is based on the following priority: 1. lambda_region as passed in (is not None) 2. if lambda_region is None, use the region as if a boto_lambda function were executed without explicitly specifying lambda region. 3. if region determined in (2) is different than the region used by boto_apigateway functions, a final lookup will be attempted using the boto_apigateway region. stage_variables A dict with variables and their values, or a pillar key (string) that contains a dict with variables and their values. key and values in the dict must be strings. {'string': 'string'} region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. lambda_funcname_format Please review the earlier example for the usage. The only substituable keys in the funcname format are {stage}, {api}, {resource}, {method}. Any other keys or positional subsitution parameters will be flagged as an invalid input. authorization_type This field can be either 'NONE', or 'AWS_IAM'. This will be applied to all methods in the given swagger spec file. Default is set to 'NONE' error_response_template String value that defines the response template mapping that should be applied in cases error occurs. Refer to AWS documentation for details: http://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-mapping-template-reference.html If set to None, the following default value is used: .. code-block:: text '#set($inputRoot = $input.path(\'$\'))\\n' '{\\n' ' "errorMessage" : "$inputRoot.errorMessage",\\n' ' "errorType" : "$inputRoot.errorType",\\n' ' "stackTrace" : [\\n' '#foreach($stackTrace in $inputRoot.stackTrace)\\n' ' [\\n' '#foreach($elem in $stackTrace)\\n' ' "$elem"\\n' '#if($foreach.hasNext),#end\\n' '#end\\n' ' ]\\n' '#if($foreach.hasNext),#end\\n' '#end\\n' ' ]\\n' .. versionadded:: 2017.7.0 response_template String value that defines the response template mapping applied in case of success (including OPTIONS method) If set to None, empty ({}) template is assumed, which will transfer response from the lambda function as is. .. versionadded:: 2017.7.0 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) # try to open the swagger file and basic validation swagger = _Swagger(api_name, stage_name, lambda_funcname_format, swagger_file, error_response_template, response_template, common_args) # retrieve stage variables stage_vars = _get_stage_variables(stage_variables) # verify if api and stage already exists ret = swagger.verify_api(ret) if ret.get('publish'): # there is a deployment label with signature matching the given api_name, # swagger file name, swagger file md5 sum, and swagger file info object # just reassociate the stage_name to the given deployment label. if __opts__['test']: ret['comment'] = ('[stage: {0}] will be reassociated to an already available ' 'deployment that matched the given [api_name: {1}] ' 'and [swagger_file: {2}].\n' 'Stage variables will be set ' 'to {3}.'.format(stage_name, api_name, swagger_file, stage_vars)) ret['result'] = None return ret return swagger.publish_api(ret, stage_vars) if ret.get('current'): # already at desired state for the stage, swagger_file, and api_name if __opts__['test']: ret['comment'] = ('[stage: {0}] is already at desired state with an associated ' 'deployment matching the given [api_name: {1}] ' 'and [swagger_file: {2}].\n' 'Stage variables will be set ' 'to {3}.'.format(stage_name, api_name, swagger_file, stage_vars)) ret['result'] = None return swagger.overwrite_stage_variables(ret, stage_vars) # there doesn't exist any previous deployments for the given swagger_file, we need # to redeploy the content of the swagger file to the api, models, and resources object # and finally create a new deployment and tie the stage_name to this new deployment if __opts__['test']: ret['comment'] = ('There is no deployment matching the given [api_name: {0}] ' 'and [swagger_file: {1}]. A new deployment will be ' 'created and the [stage_name: {2}] will then be associated ' 'to the newly created deployment.\n' 'Stage variables will be set ' 'to {3}.'.format(api_name, swagger_file, stage_name, stage_vars)) ret['result'] = None return ret ret = swagger.deploy_api(ret) if ret.get('abort'): return ret ret = swagger.deploy_models(ret) if ret.get('abort'): return ret ret = swagger.deploy_resources(ret, api_key_required=api_key_required, lambda_integration_role=lambda_integration_role, lambda_region=lambda_region, authorization_type=authorization_type) if ret.get('abort'): return ret ret = swagger.publish_api(ret, stage_vars) except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret def absent(name, api_name, stage_name, nuke_api=False, region=None, key=None, keyid=None, profile=None): ''' Ensure the stage_name associated with the given api_name deployed by boto_apigateway's present state is removed. If the currently associated deployment to the given stage_name has no other stages associated with it, the deployment will also be removed. name Name of the swagger file in YAML format api_name Name of the rest api on AWS ApiGateway to ensure is absent. stage_name Name of the stage to be removed irrespective of the swagger file content. If the current deployment associated with the stage_name has no other stages associated with it, the deployment will also be removed. nuke_api If True, removes the API itself only if there are no other stages associated with any other deployments once the given stage_name is removed. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) swagger = _Swagger(api_name, stage_name, '', None, None, None, common_args) if not swagger.restApiId: ret['comment'] = '[Rest API: {0}] does not exist.'.format(api_name) return ret if __opts__['test']: if nuke_api: ret['comment'] = ('[stage: {0}] will be deleted, if there are no other ' 'active stages, the [api: {1} will also be ' 'deleted.'.format(stage_name, api_name)) else: ret['comment'] = ('[stage: {0}] will be deleted.'.format(stage_name)) ret['result'] = None return ret ret = swagger.delete_stage(ret) if ret.get('abort'): return ret if nuke_api and swagger.no_more_deployments_remain(): ret = swagger.delete_api(ret) except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret # Helper Swagger Class for swagger version 2.0 API specification def _gen_md5_filehash(fname, *args): ''' helper function to generate a md5 hash of the swagger definition file any extra argument passed to the function is converted to a string and participates in the hash calculation ''' _hash = hashlib.md5() with salt.utils.files.fopen(fname, 'rb') as f: for chunk in iter(lambda: f.read(4096), b''): _hash.update(chunk) for extra_arg in args: _hash.update(six.b(str(extra_arg))) return _hash.hexdigest() def _dict_to_json_pretty(d, sort_keys=True): ''' helper function to generate pretty printed json output ''' return salt.utils.json.dumps(d, indent=4, separators=(',', ': '), sort_keys=sort_keys) # Heuristic on whether or not the property name loosely matches given set of 'interesting' factors # If you are interested in IDs for example, 'id', 'blah_id', 'blahId' would all match def _name_matches(name, matches): ''' Helper function to see if given name has any of the patterns in given matches ''' for m in matches: if name.endswith(m): return True if name.lower().endswith('_' + m.lower()): return True if name.lower() == m.lower(): return True return False def _object_reducer(o, names=('id', 'name', 'path', 'httpMethod', 'statusCode', 'Created', 'Deleted', 'Updated', 'Flushed', 'Associated', 'Disassociated')): ''' Helper function to reduce the amount of information that will be kept in the change log for API GW related return values ''' result = {} if isinstance(o, dict): for k, v in six.iteritems(o): if isinstance(v, dict): reduced = v if k == 'variables' else _object_reducer(v, names) if reduced or _name_matches(k, names): result[k] = reduced elif isinstance(v, list): newlist = [] for val in v: reduced = _object_reducer(val, names) if reduced or _name_matches(k, names): newlist.append(reduced) if newlist: result[k] = newlist else: if _name_matches(k, names): result[k] = v return result def _log_changes(ret, changekey, changevalue): ''' For logging create/update/delete operations to AWS ApiGateway ''' cl = ret['changes'].get('new', []) cl.append({changekey: _object_reducer(changevalue)}) ret['changes']['new'] = cl return ret def _log_error_and_abort(ret, obj): ''' helper function to update errors in the return structure ''' ret['result'] = False ret['abort'] = True if 'error' in obj: ret['comment'] = '{0}'.format(obj.get('error')) return ret class _Swagger(object): ''' this is a helper class that holds the swagger definition file and the associated logic related to how to interpret the file and apply it to AWS Api Gateway. The main interface to the outside world is in deploy_api, deploy_models, and deploy_resources methods. ''' SWAGGER_OBJ_V2_FIELDS = ('swagger', 'info', 'host', 'basePath', 'schemes', 'consumes', 'produces', 'paths', 'definitions', 'parameters', 'responses', 'securityDefinitions', 'security', 'tags', 'externalDocs') # SWAGGER OBJECT V2 Fields that are required by boto apigateway states. SWAGGER_OBJ_V2_FIELDS_REQUIRED = ('swagger', 'info', 'basePath', 'schemes', 'paths', 'definitions') # SWAGGER OPERATION NAMES SWAGGER_OPERATION_NAMES = ('get', 'put', 'post', 'delete', 'options', 'head', 'patch') SWAGGER_VERSIONS_SUPPORTED = ('2.0',) # VENDOR SPECIFIC FIELD PATTERNS VENDOR_EXT_PATTERN = re.compile('^x-') # JSON_SCHEMA_REF JSON_SCHEMA_DRAFT_4 = 'http://json-schema.org/draft-04/schema#' # AWS integration templates for normal and options methods REQUEST_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n' '{\n' '"header_params" : {\n' '#set ($map = $input.params().header)\n' '#foreach( $param in $map.entrySet() )\n' '"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n' '#end\n' '},\n' '"query_params" : {\n' '#set ($map = $input.params().querystring)\n' '#foreach( $param in $map.entrySet() )\n' '"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n' '#end\n' '},\n' '"path_params" : {\n' '#set ($map = $input.params().path)\n' '#foreach( $param in $map.entrySet() )\n' '"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n' '#end\n' '},\n' '"apigw_context" : {\n' '"apiId": "$context.apiId",\n' '"httpMethod": "$context.httpMethod",\n' '"requestId": "$context.requestId",\n' '"resourceId": "$context.resourceId",\n' '"resourcePath": "$context.resourcePath",\n' '"stage": "$context.stage",\n' '"identity": {\n' ' "user":"$context.identity.user",\n' ' "userArn":"$context.identity.userArn",\n' ' "userAgent":"$context.identity.userAgent",\n' ' "sourceIp":"$context.identity.sourceIp",\n' ' "cognitoIdentityId":"$context.identity.cognitoIdentityId",\n' ' "cognitoIdentityPoolId":"$context.identity.cognitoIdentityPoolId",\n' ' "cognitoAuthenticationType":"$context.identity.cognitoAuthenticationType",\n' ' "cognitoAuthenticationProvider":["$util.escapeJavaScript($context.identity.cognitoAuthenticationProvider)"],\n' ' "caller":"$context.identity.caller",\n' ' "apiKey":"$context.identity.apiKey",\n' ' "accountId":"$context.identity.accountId"\n' '}\n' '},\n' '"body_params" : $input.json(\'$\'),\n' '"stage_variables": {\n' '#foreach($variable in $stageVariables.keySet())\n' '"$variable": "$util.escapeJavaScript($stageVariables.get($variable))"\n' '#if($foreach.hasNext), #end\n' '#end\n' '}\n' '}'} REQUEST_OPTION_TEMPLATE = {'application/json': '{"statusCode": 200}'} # AWS integration response template mapping to convert stackTrace part or the error # to a uniform format containing strings only. Swagger does not seem to allow defining # an array of non-uniform types, to it is not possible to create error model to match # exactly what comes out of lambda functions in case of error. RESPONSE_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n' '{\n' ' "errorMessage" : "$inputRoot.errorMessage",\n' ' "errorType" : "$inputRoot.errorType",\n' ' "stackTrace" : [\n' '#foreach($stackTrace in $inputRoot.stackTrace)\n' ' [\n' '#foreach($elem in $stackTrace)\n' ' "$elem"\n' '#if($foreach.hasNext),#end\n' '#end\n' ' ]\n' '#if($foreach.hasNext),#end\n' '#end\n' ' ]\n' '}'} RESPONSE_OPTION_TEMPLATE = {} # This string should not be modified, every API created by this state will carry the description # below. AWS_API_DESCRIPTION = _dict_to_json_pretty({"provisioned_by": "Salt boto_apigateway.present State", "context": "See deployment or stage description"}) class SwaggerParameter(object): ''' This is a helper class for the Swagger Parameter Object ''' LOCATIONS = ('body', 'query', 'header', 'path') def __init__(self, paramdict): self._paramdict = paramdict @property def location(self): ''' returns location in the swagger parameter object ''' _location = self._paramdict.get('in') if _location in _Swagger.SwaggerParameter.LOCATIONS: return _location raise ValueError('Unsupported parameter location: {0} in Parameter Object'.format(_location)) @property def name(self): ''' returns parameter name in the swagger parameter object ''' _name = self._paramdict.get('name') if _name: if self.location == 'header': return 'method.request.header.{0}'.format(_name) elif self.location == 'query': return 'method.request.querystring.{0}'.format(_name) elif self.location == 'path': return 'method.request.path.{0}'.format(_name) return None raise ValueError('Parameter must have a name: {0}'.format(_dict_to_json_pretty(self._paramdict))) @property def schema(self): ''' returns the name of the schema given the reference in the swagger parameter object ''' if self.location == 'body': _schema = self._paramdict.get('schema') if _schema: if '$ref' in _schema: schema_name = _schema.get('$ref').split('/')[-1] return schema_name raise ValueError(('Body parameter must have a JSON reference ' 'to the schema definition due to Amazon API restrictions: {0}'.format(self.name))) raise ValueError('Body parameter must have a schema: {0}'.format(self.name)) return None class SwaggerMethodResponse(object): ''' Helper class for Swagger Method Response Object ''' def __init__(self, r): self._r = r @property def schema(self): ''' returns the name of the schema given the reference in the swagger method response object ''' _schema = self._r.get('schema') if _schema: if '$ref' in _schema: return _schema.get('$ref').split('/')[-1] raise ValueError(('Method response must have a JSON reference ' 'to the schema definition: {0}'.format(_schema))) return None @property def headers(self): ''' returns the headers dictionary in the method response object ''' _headers = self._r.get('headers', {}) return _headers def __init__(self, api_name, stage_name, lambda_funcname_format, swagger_file_path, error_response_template, response_template, common_aws_args): self._api_name = api_name self._stage_name = stage_name self._lambda_funcname_format = lambda_funcname_format self._common_aws_args = common_aws_args self._restApiId = '' self._deploymentId = '' self._error_response_template = error_response_template self._response_template = response_template if swagger_file_path is not None: if os.path.exists(swagger_file_path) and os.path.isfile(swagger_file_path): self._swagger_file = swagger_file_path self._md5_filehash = _gen_md5_filehash(self._swagger_file, error_response_template, response_template) with salt.utils.files.fopen(self._swagger_file, 'rb') as sf: self._cfg = salt.utils.yaml.safe_load(sf) self._swagger_version = '' else: raise IOError('Invalid swagger file path, {0}'.format(swagger_file_path)) self._validate_swagger_file() self._validate_lambda_funcname_format() self._resolve_api_id() def _is_http_error_rescode(self, code): ''' Helper function to determine if the passed code is in the 400~599 range of http error codes ''' return bool(re.match(r'^\s*[45]\d\d\s*$', code)) def _validate_error_response_model(self, paths, mods): ''' Helper function to help validate the convention established in the swagger file on how to handle response code mapping/integration ''' for path, ops in paths: for opname, opobj in six.iteritems(ops): if opname not in _Swagger.SWAGGER_OPERATION_NAMES: continue if 'responses' not in opobj: raise ValueError('missing mandatory responses field in path item object') for rescode, resobj in six.iteritems(opobj.get('responses')): if not self._is_http_error_rescode(str(rescode)): # future lint: disable=blacklisted-function continue # only check for response code from 400-599 if 'schema' not in resobj: raise ValueError('missing schema field in path {0}, ' 'op {1}, response {2}'.format(path, opname, rescode)) schemaobj = resobj.get('schema') if '$ref' not in schemaobj: raise ValueError('missing $ref field under schema in ' 'path {0}, op {1}, response {2}'.format(path, opname, rescode)) schemaobjref = schemaobj.get('$ref', '/') modelname = schemaobjref.split('/')[-1] if modelname not in mods: raise ValueError('model schema {0} reference not found ' 'under /definitions'.format(schemaobjref)) model = mods.get(modelname) if model.get('type') != 'object': raise ValueError('model schema {0} must be type object'.format(modelname)) if 'properties' not in model: raise ValueError('model schema {0} must have properties fields'.format(modelname)) modelprops = model.get('properties') if 'errorMessage' not in modelprops: raise ValueError('model schema {0} must have errorMessage as a property to ' 'match AWS convention. If pattern is not set, .+ will ' 'be used'.format(modelname)) def _validate_lambda_funcname_format(self): ''' Checks if the lambda function name format contains only known elements :return: True on success, ValueError raised on error ''' try: if self._lambda_funcname_format: known_kwargs = dict(stage='', api='', resource='', method='') self._lambda_funcname_format.format(**known_kwargs) return True except Exception: raise ValueError('Invalid lambda_funcname_format {0}. Please review ' 'documentation for known substitutable keys'.format(self._lambda_funcname_format)) def _validate_swagger_file(self): ''' High level check/validation of the input swagger file based on https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md This is not a full schema compliance check, but rather make sure that the input file (YAML or JSON) can be read into a dictionary, and we check for the content of the Swagger Object for version and info. ''' # check for any invalid fields for Swagger Object V2 for field in self._cfg: if (field not in _Swagger.SWAGGER_OBJ_V2_FIELDS and not _Swagger.VENDOR_EXT_PATTERN.match(field)): raise ValueError('Invalid Swagger Object Field: {0}'.format(field)) # check for Required Swagger fields by Saltstack boto apigateway state for field in _Swagger.SWAGGER_OBJ_V2_FIELDS_REQUIRED: if field not in self._cfg: raise ValueError('Missing Swagger Object Field: {0}'.format(field)) # check for Swagger Version self._swagger_version = self._cfg.get('swagger') if self._swagger_version not in _Swagger.SWAGGER_VERSIONS_SUPPORTED: raise ValueError('Unsupported Swagger version: {0},' 'Supported versions are {1}'.format(self._swagger_version, _Swagger.SWAGGER_VERSIONS_SUPPORTED)) log.info(type(self._models)) self._validate_error_response_model(self.paths, self._models()) @property def md5_filehash(self): ''' returns md5 hash for the swagger file ''' return self._md5_filehash @property def info(self): ''' returns the swagger info object as a dictionary ''' info = self._cfg.get('info') if not info: raise ValueError('Info Object has no values') return info @property def info_json(self): ''' returns the swagger info object as a pretty printed json string. ''' return _dict_to_json_pretty(self.info) @property def rest_api_name(self): ''' returns the name of the api ''' return self._api_name @property def rest_api_version(self): ''' returns the version field in the swagger info object ''' version = self.info.get('version') if not version: raise ValueError('Missing version value in Info Object') return version def _models(self): ''' returns an iterator for the models specified in the swagger file ''' models = self._cfg.get('definitions') if not models: raise ValueError('Definitions Object has no values, You need to define them in your swagger file') return models def models(self): ''' generator to return the tuple of model and its schema to create on aws. ''' model_dict = self._build_all_dependencies() while True: model = self._get_model_without_dependencies(model_dict) if not model: break yield (model, self._models().get(model)) @property def paths(self): ''' returns an iterator for the relative resource paths specified in the swagger file ''' paths = self._cfg.get('paths') if not paths: raise ValueError('Paths Object has no values, You need to define them in your swagger file') for path in paths: if not path.startswith('/'): raise ValueError('Path object {0} should start with /. Please fix it'.format(path)) return six.iteritems(paths) @property def basePath(self): ''' returns the base path field as defined in the swagger file ''' basePath = self._cfg.get('basePath', '') return basePath @property def restApiId(self): ''' returns the rest api id as returned by AWS on creation of the rest api ''' return self._restApiId @restApiId.setter def restApiId(self, restApiId): ''' allows the assignment of the rest api id on creation of the rest api ''' self._restApiId = restApiId @property def deployment_label_json(self): ''' this property returns the unique description in pretty printed json for a particular api deployment ''' return _dict_to_json_pretty(self.deployment_label) @property def deployment_label(self): ''' this property returns the deployment label dictionary (mainly used by stage description) ''' label = dict() label['swagger_info_object'] = self.info label['api_name'] = self.rest_api_name label['swagger_file'] = os.path.basename(self._swagger_file) label['swagger_file_md5sum'] = self.md5_filehash return label # methods to interact with boto_apigateway execution modules def _one_or_more_stages_remain(self, deploymentId): ''' Helper function to find whether there are other stages still associated with a deployment ''' stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args).get('stages') return bool(stages) def no_more_deployments_remain(self): ''' Helper function to find whether there are deployments left with stages associated ''' no_more_deployments = True deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId, **self._common_aws_args).get('deployments') if deployments: for deployment in deployments: deploymentId = deployment.get('id') stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args).get('stages') if stages: no_more_deployments = False break return no_more_deployments def _get_current_deployment_id(self): ''' Helper method to find the deployment id that the stage name is currently assocaited with. ''' deploymentId = '' stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, **self._common_aws_args).get('stage') if stage: deploymentId = stage.get('deploymentId') return deploymentId def _get_current_deployment_label(self): ''' Helper method to find the deployment label that the stage_name is currently associated with. ''' deploymentId = self._get_current_deployment_id() deployment = __salt__['boto_apigateway.describe_api_deployment'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args).get('deployment') if deployment: return deployment.get('description') return None def _get_desired_deployment_id(self): ''' Helper method to return the deployment id matching the desired deployment label for this Swagger object based on the given api_name, swagger_file ''' deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId, **self._common_aws_args).get('deployments') if deployments: for deployment in deployments: if deployment.get('description') == self.deployment_label_json: return deployment.get('id') return '' def overwrite_stage_variables(self, ret, stage_variables): ''' overwrite the given stage_name's stage variables with the given stage_variables ''' res = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId, stageName=self._stage_name, variables=stage_variables, **self._common_aws_args) if not res.get('overwrite'): ret['result'] = False ret['abort'] = True ret['comment'] = res.get('error') else: ret = _log_changes(ret, 'overwrite_stage_variables', res.get('stage')) return ret def _set_current_deployment(self, stage_desc_json, stage_variables): ''' Helper method to associate the stage_name to the given deploymentId and make this current ''' stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, **self._common_aws_args).get('stage') if not stage: stage = __salt__['boto_apigateway.create_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, deploymentId=self._deploymentId, description=stage_desc_json, variables=stage_variables, **self._common_aws_args) if not stage.get('stage'): return {'set': False, 'error': stage.get('error')} else: # overwrite the stage variables overwrite = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId, stageName=self._stage_name, variables=stage_variables, **self._common_aws_args) if not overwrite.get('stage'): return {'set': False, 'error': overwrite.get('error')} return __salt__['boto_apigateway.activate_api_deployment'](restApiId=self.restApiId, stageName=self._stage_name, deploymentId=self._deploymentId, **self._common_aws_args) def _resolve_api_id(self): ''' returns an Api Id that matches the given api_name and the hardcoded _Swagger.AWS_API_DESCRIPTION as the api description ''' apis = __salt__['boto_apigateway.describe_apis'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args).get('restapi') if apis: if len(apis) == 1: self.restApiId = apis[0].get('id') else: raise ValueError('Multiple APIs matching given name {0} and ' 'description {1}'.format(self.rest_api_name, self.info_json)) def delete_stage(self, ret): ''' Method to delete the given stage_name. If the current deployment tied to the given stage_name has no other stages associated with it, the deployment will be removed as well ''' deploymentId = self._get_current_deployment_id() if deploymentId: result = __salt__['boto_apigateway.delete_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, **self._common_aws_args) if not result.get('deleted'): ret['abort'] = True ret['result'] = False ret['comment'] = 'delete_stage delete_api_stage, {0}'.format(result.get('error')) else: # check if it is safe to delete the deployment as well. if not self._one_or_more_stages_remain(deploymentId): result = __salt__['boto_apigateway.delete_api_deployment'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args) if not result.get('deleted'): ret['abort'] = True ret['result'] = False ret['comment'] = 'delete_stage delete_api_deployment, {0}'.format(result.get('error')) else: ret['comment'] = 'stage {0} has been deleted.\n'.format(self._stage_name) else: # no matching stage_name/deployment found ret['comment'] = 'stage {0} does not exist'.format(self._stage_name) return ret def verify_api(self, ret): ''' this method helps determine if the given stage_name is already on a deployment label matching the input api_name, swagger_file. If yes, returns abort with comment indicating already at desired state. If not and there is previous deployment labels in AWS matching the given input api_name and swagger file, indicate to the caller that we only need to reassociate stage_name to the previously existing deployment label. ''' if self.restApiId: deployed_label_json = self._get_current_deployment_label() if deployed_label_json == self.deployment_label_json: ret['comment'] = ('Already at desired state, the stage {0} is already at the desired ' 'deployment label:\n{1}'.format(self._stage_name, deployed_label_json)) ret['current'] = True return ret else: self._deploymentId = self._get_desired_deployment_id() if self._deploymentId: ret['publish'] = True return ret def publish_api(self, ret, stage_variables): ''' this method tie the given stage_name to a deployment matching the given swagger_file ''' stage_desc = dict() stage_desc['current_deployment_label'] = self.deployment_label stage_desc_json = _dict_to_json_pretty(stage_desc) if self._deploymentId: # just do a reassociate of stage_name to an already existing deployment res = self._set_current_deployment(stage_desc_json, stage_variables) if not res.get('set'): ret['abort'] = True ret['result'] = False ret['comment'] = res.get('error') else: ret = _log_changes(ret, 'publish_api (reassociate deployment, set stage_variables)', res.get('response')) else: # no deployment existed for the given swagger_file for this Swagger object res = __salt__['boto_apigateway.create_api_deployment'](restApiId=self.restApiId, stageName=self._stage_name, stageDescription=stage_desc_json, description=self.deployment_label_json, variables=stage_variables, **self._common_aws_args) if not res.get('created'): ret['abort'] = True ret['result'] = False ret['comment'] = res.get('error') else: ret = _log_changes(ret, 'publish_api (new deployment)', res.get('deployment')) return ret def _cleanup_api(self): ''' Helper method to clean up resources and models if we detected a change in the swagger file for a stage ''' resources = __salt__['boto_apigateway.describe_api_resources'](restApiId=self.restApiId, **self._common_aws_args) if resources.get('resources'): res = resources.get('resources')[1:] res.reverse() for resource in res: delres = __salt__['boto_apigateway.delete_api_resources'](restApiId=self.restApiId, path=resource.get('path'), **self._common_aws_args) if not delres.get('deleted'): return delres models = __salt__['boto_apigateway.describe_api_models'](restApiId=self.restApiId, **self._common_aws_args) if models.get('models'): for model in models.get('models'): delres = __salt__['boto_apigateway.delete_api_model'](restApiId=self.restApiId, modelName=model.get('name'), **self._common_aws_args) if not delres.get('deleted'): return delres return {'deleted': True} def deploy_api(self, ret): ''' this method create the top level rest api in AWS apigateway ''' if self.restApiId: res = self._cleanup_api() if not res.get('deleted'): ret['comment'] = 'Failed to cleanup restAreId {0}'.format(self.restApiId) ret['abort'] = True ret['result'] = False return ret return ret response = __salt__['boto_apigateway.create_api'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args) if not response.get('created'): ret['result'] = False ret['abort'] = True if 'error' in response: ret['comment'] = 'Failed to create rest api: {0}.'.format(response['error']['message']) return ret self.restApiId = response.get('restapi', {}).get('id') return _log_changes(ret, 'deploy_api', response.get('restapi')) def delete_api(self, ret): ''' Method to delete a Rest Api named defined in the swagger file's Info Object's title value. ret a dictionary for returning status to Saltstack ''' exists_response = __salt__['boto_apigateway.api_exists'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args) if exists_response.get('exists'): if __opts__['test']: ret['comment'] = 'Rest API named {0} is set to be deleted.'.format(self.rest_api_name) ret['result'] = None ret['abort'] = True return ret delete_api_response = __salt__['boto_apigateway.delete_api'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args) if not delete_api_response.get('deleted'): ret['result'] = False ret['abort'] = True if 'error' in delete_api_response: ret['comment'] = 'Failed to delete rest api: {0}.'.format(delete_api_response['error']['message']) return ret ret = _log_changes(ret, 'delete_api', delete_api_response) else: ret['comment'] = ('api already absent for swagger file: ' '{0}, desc: {1}'.format(self.rest_api_name, self.info_json)) return ret def _aws_model_ref_from_swagger_ref(self, r): ''' Helper function to reference models created on aws apigw ''' model_name = r.split('/')[-1] return 'https://apigateway.amazonaws.com/restapis/{0}/models/{1}'.format(self.restApiId, model_name) def _update_schema_to_aws_notation(self, schema): ''' Helper function to map model schema to aws notation ''' result = {} for k, v in schema.items(): if k == '$ref': v = self._aws_model_ref_from_swagger_ref(v) if isinstance(v, dict): v = self._update_schema_to_aws_notation(v) result[k] = v return result def _build_dependent_model_list(self, obj_schema): ''' Helper function to build the list of models the given object schema is referencing. ''' dep_models_list = [] if obj_schema: obj_schema['type'] = obj_schema.get('type', 'object') if obj_schema['type'] == 'array': dep_models_list.extend(self._build_dependent_model_list(obj_schema.get('items', {}))) else: ref = obj_schema.get('$ref') if ref: ref_obj_model = ref.split("/")[-1] ref_obj_schema = self._models().get(ref_obj_model) dep_models_list.extend(self._build_dependent_model_list(ref_obj_schema)) dep_models_list.extend([ref_obj_model]) else: # need to walk each property object properties = obj_schema.get('properties') if properties: for _, prop_obj_schema in six.iteritems(properties): dep_models_list.extend(self._build_dependent_model_list(prop_obj_schema)) return list(set(dep_models_list)) def _build_all_dependencies(self): ''' Helper function to build a map of model to their list of model reference dependencies ''' ret = {} for model, schema in six.iteritems(self._models()): dep_list = self._build_dependent_model_list(schema) ret[model] = dep_list return ret def _get_model_without_dependencies(self, models_dict): ''' Helper function to find the next model that should be created ''' next_model = None if not models_dict: return next_model for model, dependencies in six.iteritems(models_dict): if dependencies == []: next_model = model break if next_model is None: raise ValueError('incomplete model definitions, models in dependency ' 'list not defined: {0}'.format(models_dict)) # remove the model from other depednencies before returning models_dict.pop(next_model) for model, dep_list in six.iteritems(models_dict): if next_model in dep_list: dep_list.remove(next_model) return next_model def deploy_models(self, ret): ''' Method to deploy swagger file's definition objects and associated schema to AWS Apigateway as Models ret a dictionary for returning status to Saltstack ''' for model, schema in self.models(): # add in a few attributes into the model schema that AWS expects # _schema = schema.copy() _schema = self._update_schema_to_aws_notation(schema) _schema.update({'$schema': _Swagger.JSON_SCHEMA_DRAFT_4, 'title': '{0} Schema'.format(model)}) # check to see if model already exists, aws has 2 default models [Empty, Error] # which may need upate with data from swagger file model_exists_response = __salt__['boto_apigateway.api_model_exists'](restApiId=self.restApiId, modelName=model, **self._common_aws_args) if model_exists_response.get('exists'): update_model_schema_response = ( __salt__['boto_apigateway.update_api_model_schema'](restApiId=self.restApiId, modelName=model, schema=_dict_to_json_pretty(_schema), **self._common_aws_args)) if not update_model_schema_response.get('updated'): ret['result'] = False ret['abort'] = True if 'error' in update_model_schema_response: ret['comment'] = ('Failed to update existing model {0} with schema {1}, ' 'error: {2}'.format(model, _dict_to_json_pretty(schema), update_model_schema_response['error']['message'])) return ret ret = _log_changes(ret, 'deploy_models', update_model_schema_response) else: create_model_response = ( __salt__['boto_apigateway.create_api_model'](restApiId=self.restApiId, modelName=model, modelDescription=model, schema=_dict_to_json_pretty(_schema), contentType='application/json', **self._common_aws_args)) if not create_model_response.get('created'): ret['result'] = False ret['abort'] = True if 'error' in create_model_response: ret['comment'] = ('Failed to create model {0}, schema {1}, ' 'error: {2}'.format(model, _dict_to_json_pretty(schema), create_model_response['error']['message'])) return ret ret = _log_changes(ret, 'deploy_models', create_model_response) return ret def _lambda_name(self, resourcePath, httpMethod): ''' Helper method to construct lambda name based on the rule specified in doc string of boto_apigateway.api_present function ''' lambda_name = self._lambda_funcname_format.format(stage=self._stage_name, api=self.rest_api_name, resource=resourcePath, method=httpMethod) lambda_name = lambda_name.strip() lambda_name = re.sub(r'{|}', '', lambda_name) lambda_name = re.sub(r'\s+|/', '_', lambda_name).lower() return re.sub(r'_+', '_', lambda_name) def _lambda_uri(self, lambda_name, lambda_region): ''' Helper Method to construct the lambda uri for use in method integration ''' profile = self._common_aws_args.get('profile') region = self._common_aws_args.get('region') lambda_region = __utils__['boto3.get_region']('lambda', lambda_region, profile) apigw_region = __utils__['boto3.get_region']('apigateway', region, profile) lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args) if lambda_region != apigw_region: if not lambda_desc.get('function'): # try look up in the same region as the apigateway as well if previous lookup failed lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args) if not lambda_desc.get('function'): raise ValueError('Could not find lambda function {0} in ' 'regions [{1}, {2}].'.format(lambda_name, lambda_region, apigw_region)) lambda_arn = lambda_desc.get('function').get('FunctionArn') lambda_uri = ('arn:aws:apigateway:{0}:lambda:path/2015-03-31' '/functions/{1}/invocations'.format(apigw_region, lambda_arn)) return lambda_uri def _parse_method_data(self, method_name, method_data): ''' Helper function to construct the method request params, models, request_templates and integration_type values needed to configure method request integration/mappings. ''' method_params = {} method_models = {} if 'parameters' in method_data: for param in method_data['parameters']: p = _Swagger.SwaggerParameter(param) if p.name: method_params[p.name] = True if p.schema: method_models['application/json'] = p.schema request_templates = _Swagger.REQUEST_OPTION_TEMPLATE if method_name == 'options' else _Swagger.REQUEST_TEMPLATE integration_type = "MOCK" if method_name == 'options' else "AWS" return {'params': method_params, 'models': method_models, 'request_templates': request_templates, 'integration_type': integration_type} def _find_patterns(self, o): result = [] if isinstance(o, dict): for k, v in six.iteritems(o): if isinstance(v, dict): result.extend(self._find_patterns(v)) else: if k == 'pattern': result.append(v) return result def _get_pattern_for_schema(self, schema_name, httpStatus): ''' returns the pattern specified in a response schema ''' defaultPattern = '.+' if self._is_http_error_rescode(httpStatus) else '.*' model = self._models().get(schema_name) patterns = self._find_patterns(model) return patterns[0] if patterns else defaultPattern def _get_response_template(self, method_name, http_status): if method_name == 'options' or not self._is_http_error_rescode(http_status): response_templates = {'application/json': self._response_template} \ if self._response_template else self.RESPONSE_OPTION_TEMPLATE else: response_templates = {'application/json': self._error_response_template} \ if self._error_response_template else self.RESPONSE_TEMPLATE return response_templates def _parse_method_response(self, method_name, method_response, httpStatus): ''' Helper function to construct the method response params, models, and integration_params values needed to configure method response integration/mappings. ''' method_response_models = {} method_response_pattern = '.*' if method_response.schema: method_response_models['application/json'] = method_response.schema method_response_pattern = self._get_pattern_for_schema(method_response.schema, httpStatus) method_response_params = {} method_integration_response_params = {} for header in method_response.headers: response_header = 'method.response.header.{0}'.format(header) method_response_params[response_header] = False header_data = method_response.headers.get(header) method_integration_response_params[response_header] = ( "'{0}'".format(header_data.get('default')) if 'default' in header_data else "'*'") response_templates = self._get_response_template(method_name, httpStatus) return {'params': method_response_params, 'models': method_response_models, 'integration_params': method_integration_response_params, 'pattern': method_response_pattern, 'response_templates': response_templates} def _deploy_method(self, ret, resource_path, method_name, method_data, api_key_required, lambda_integration_role, lambda_region, authorization_type): ''' Method to create a method for the given resource path, along with its associated request and response integrations. ret a dictionary for returning status to Saltstack resource_path the full resource path where the named method_name will be associated with. method_name a string that is one of the following values: 'delete', 'get', 'head', 'options', 'patch', 'post', 'put' method_data the value dictionary for this method in the swagger definition file. api_key_required True or False, whether api key is required to access this method. lambda_integration_role name of the IAM role or IAM role arn that Api Gateway will assume when executing the associated lambda function lambda_region the region for the lambda function that Api Gateway will integrate to. authorization_type 'NONE' or 'AWS_IAM' ''' method = self._parse_method_data(method_name.lower(), method_data) # for options method to enable CORS, api_key_required will be set to False always. # authorization_type will be set to 'NONE' always. if method_name.lower() == 'options': api_key_required = False authorization_type = 'NONE' m = __salt__['boto_apigateway.create_api_method'](restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), authorizationType=authorization_type, apiKeyRequired=api_key_required, requestParameters=method.get('params'), requestModels=method.get('models'), **self._common_aws_args) if not m.get('created'): ret = _log_error_and_abort(ret, m) return ret ret = _log_changes(ret, '_deploy_method.create_api_method', m) lambda_uri = "" if method_name.lower() != 'options': lambda_uri = self._lambda_uri(self._lambda_name(resource_path, method_name), lambda_region=lambda_region) # NOTE: integration method is set to POST always, as otherwise AWS makes wrong assumptions # about the intent of the call. HTTP method will be passed to lambda as part of the API gateway context integration = ( __salt__['boto_apigateway.create_api_integration'](restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), integrationType=method.get('integration_type'), integrationHttpMethod='POST', uri=lambda_uri, credentials=lambda_integration_role, requestTemplates=method.get('request_templates'), **self._common_aws_args)) if not integration.get('created'): ret = _log_error_and_abort(ret, integration) return ret ret = _log_changes(ret, '_deploy_method.create_api_integration', integration) if 'responses' in method_data: for response, response_data in six.iteritems(method_data['responses']): httpStatus = str(response) # future lint: disable=blacklisted-function method_response = self._parse_method_response(method_name.lower(), _Swagger.SwaggerMethodResponse(response_data), httpStatus) mr = __salt__['boto_apigateway.create_api_method_response']( restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), statusCode=httpStatus, responseParameters=method_response.get('params'), responseModels=method_response.get('models'), **self._common_aws_args) if not mr.get('created'): ret = _log_error_and_abort(ret, mr) return ret ret = _log_changes(ret, '_deploy_method.create_api_method_response', mr) mir = __salt__['boto_apigateway.create_api_integration_response']( restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), statusCode=httpStatus, selectionPattern=method_response.get('pattern'), responseParameters=method_response.get('integration_params'), responseTemplates=method_response.get('response_templates'), **self._common_aws_args) if not mir.get('created'): ret = _log_error_and_abort(ret, mir) return ret ret = _log_changes(ret, '_deploy_method.create_api_integration_response', mir) else: raise ValueError('No responses specified for {0} {1}'.format(resource_path, method_name)) return ret def deploy_resources(self, ret, api_key_required, lambda_integration_role, lambda_region, authorization_type): ''' Method to deploy resources defined in the swagger file. ret a dictionary for returning status to Saltstack api_key_required True or False, whether api key is required to access this method. lambda_integration_role name of the IAM role or IAM role arn that Api Gateway will assume when executing the associated lambda function lambda_region the region for the lambda function that Api Gateway will integrate to. authorization_type 'NONE' or 'AWS_IAM' ''' for path, pathData in self.paths: resource = __salt__['boto_apigateway.create_api_resources'](restApiId=self.restApiId, path=path, **self._common_aws_args) if not resource.get('created'): ret = _log_error_and_abort(ret, resource) return ret ret = _log_changes(ret, 'deploy_resources', resource) for method, method_data in six.iteritems(pathData): if method in _Swagger.SWAGGER_OPERATION_NAMES: ret = self._deploy_method(ret, path, method, method_data, api_key_required, lambda_integration_role, lambda_region, authorization_type) return ret def usage_plan_present(name, plan_name, description=None, throttle=None, quota=None, region=None, key=None, keyid=None, profile=None): ''' Ensure the spcifieda usage plan with the corresponding metrics is deployed .. versionadded:: 2017.7.0 name name of the state plan_name [Required] name of the usage plan throttle [Optional] throttling parameters expressed as a dictionary. If provided, at least one of the throttling parameters must be present rateLimit rate per second at which capacity bucket is populated burstLimit maximum rate allowed quota [Optional] quota on the number of api calls permitted by the plan. If provided, limit and period must be present limit [Required] number of calls permitted per quota period offset [Optional] number of calls to be subtracted from the limit at the beginning of the period period [Required] period to which quota applies. Must be DAY, WEEK or MONTH .. code-block:: yaml UsagePlanPresent: boto_apigateway.usage_plan_present: - plan_name: my_usage_plan - throttle: rateLimit: 70 burstLimit: 100 - quota: limit: 1000 offset: 0 period: DAY - profile: my_profile ''' func_params = locals() ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) existing = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args) if 'error' in existing: ret['result'] = False ret['comment'] = 'Failed to describe existing usage plans' return ret if not existing['plans']: # plan does not exist, we need to create it if __opts__['test']: ret['comment'] = 'a new usage plan {0} would be created'.format(plan_name) ret['result'] = None return ret result = __salt__['boto_apigateway.create_usage_plan'](name=plan_name, description=description, throttle=throttle, quota=quota, **common_args) if 'error' in result: ret['result'] = False ret['comment'] = 'Failed to create a usage plan {0}, {1}'.format(plan_name, result['error']) return ret ret['changes']['old'] = {'plan': None} ret['comment'] = 'A new usage plan {0} has been created'.format(plan_name) else: # need an existing plan modified to match given value plan = existing['plans'][0] needs_updating = False modifiable_params = (('throttle', ('rateLimit', 'burstLimit')), ('quota', ('limit', 'offset', 'period'))) for p, fields in modifiable_params: for f in fields: actual_param = {} if func_params.get(p) is None else func_params.get(p) if plan.get(p, {}).get(f, None) != actual_param.get(f, None): needs_updating = True break if not needs_updating: ret['comment'] = 'usage plan {0} is already in a correct state'.format(plan_name) ret['result'] = True return ret if __opts__['test']: ret['comment'] = 'a new usage plan {0} would be updated'.format(plan_name) ret['result'] = None return ret result = __salt__['boto_apigateway.update_usage_plan'](plan['id'], throttle=throttle, quota=quota, **common_args) if 'error' in result: ret['result'] = False ret['comment'] = 'Failed to update a usage plan {0}, {1}'.format(plan_name, result['error']) return ret ret['changes']['old'] = {'plan': plan} ret['comment'] = 'usage plan {0} has been updated'.format(plan_name) newstate = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args) if 'error' in existing: ret['result'] = False ret['comment'] = 'Failed to describe existing usage plans after updates' return ret ret['changes']['new'] = {'plan': newstate['plans'][0]} except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret def usage_plan_absent(name, plan_name, region=None, key=None, keyid=None, profile=None): ''' Ensures usage plan identified by name is no longer present .. versionadded:: 2017.7.0 name name of the state plan_name name of the plan to remove .. code-block:: yaml usage plan absent: boto_apigateway.usage_plan_absent: - plan_name: my_usage_plan - profile: my_profile ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) existing = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args) if 'error' in existing: ret['result'] = False ret['comment'] = 'Failed to describe existing usage plans' return ret if not existing['plans']: ret['comment'] = 'Usage plan {0} does not exist already'.format(plan_name) return ret if __opts__['test']: ret['comment'] = 'Usage plan {0} exists and would be deleted'.format(plan_name) ret['result'] = None return ret plan_id = existing['plans'][0]['id'] result = __salt__['boto_apigateway.delete_usage_plan'](plan_id, **common_args) if 'error' in result: ret['result'] = False ret['comment'] = 'Failed to delete usage plan {0}, {1}'.format(plan_name, result) return ret ret['comment'] = 'Usage plan {0} has been deleted'.format(plan_name) ret['changes']['old'] = {'plan': existing['plans'][0]} ret['changes']['new'] = {'plan': None} except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret def usage_plan_association_present(name, plan_name, api_stages, region=None, key=None, keyid=None, profile=None): ''' Ensures usage plan identified by name is added to provided api_stages .. versionadded:: 2017.7.0 name name of the state plan_name name of the plan to use api_stages list of dictionaries, where each dictionary consists of the following keys: apiId apiId of the api to attach usage plan to stage stage name of the api to attach usage plan to .. code-block:: yaml UsagePlanAssociationPresent: boto_apigateway.usage_plan_association_present: - plan_name: my_plan - api_stages: - apiId: 9kb0404ec0 stage: my_stage - apiId: l9v7o2aj90 stage: my_stage - profile: my_profile ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) existing = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args) if 'error' in existing: ret['result'] = False ret['comment'] = 'Failed to describe existing usage plans' return ret if not existing['plans']: ret['comment'] = 'Usage plan {0} does not exist'.format(plan_name) ret['result'] = False return ret if len(existing['plans']) != 1: ret['comment'] = 'There are multiple usage plans with the same name - it is not supported' ret['result'] = False return ret plan = existing['plans'][0] plan_id = plan['id'] plan_stages = plan.get('apiStages', []) stages_to_add = [] for api in api_stages: if api not in plan_stages: stages_to_add.append(api) if not stages_to_add: ret['comment'] = 'Usage plan is already asssociated to all api stages' return ret result = __salt__['boto_apigateway.attach_usage_plan_to_apis'](plan_id, stages_to_add, **common_args) if 'error' in result: ret['comment'] = 'Failed to associate a usage plan {0} to the apis {1}, {2}'.format(plan_name, stages_to_add, result['error']) ret['result'] = False return ret ret['comment'] = 'successfully associated usage plan to apis' ret['changes']['old'] = plan_stages ret['changes']['new'] = result.get('result', {}).get('apiStages', []) except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret def usage_plan_association_absent(name, plan_name, api_stages, region=None, key=None, keyid=None, profile=None): ''' Ensures usage plan identified by name is removed from provided api_stages If a plan is associated to stages not listed in api_stages parameter, those associations remain intact. .. versionadded:: 2017.7.0 name name of the state plan_name name of the plan to use api_stages list of dictionaries, where each dictionary consists of the following keys: apiId apiId of the api to detach usage plan from stage stage name of the api to detach usage plan from .. code-block:: yaml UsagePlanAssociationAbsent: boto_apigateway.usage_plan_association_absent: - plan_name: my_plan - api_stages: - apiId: 9kb0404ec0 stage: my_stage - apiId: l9v7o2aj90 stage: my_stage - profile: my_profile ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) existing = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args) if 'error' in existing: ret['result'] = False ret['comment'] = 'Failed to describe existing usage plans' return ret if not existing['plans']: ret['comment'] = 'Usage plan {0} does not exist'.format(plan_name) ret['result'] = False return ret if len(existing['plans']) != 1: ret['comment'] = 'There are multiple usage plans with the same name - it is not supported' ret['result'] = False return ret plan = existing['plans'][0] plan_id = plan['id'] plan_stages = plan.get('apiStages', []) if not plan_stages: ret['comment'] = 'Usage plan {0} has no associated stages already'.format(plan_name) return ret stages_to_remove = [] for api in api_stages: if api in plan_stages: stages_to_remove.append(api) if not stages_to_remove: ret['comment'] = 'Usage plan is already not asssociated to any api stages' return ret result = __salt__['boto_apigateway.detach_usage_plan_from_apis'](plan_id, stages_to_remove, **common_args) if 'error' in result: ret['comment'] = 'Failed to disassociate a usage plan {0} from the apis {1}, {2}'.format(plan_name, stages_to_remove, result['error']) ret['result'] = False return ret ret['comment'] = 'successfully disassociated usage plan from apis' ret['changes']['old'] = plan_stages ret['changes']['new'] = result.get('result', {}).get('apiStages', []) except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret
saltstack/salt
salt/states/boto_apigateway.py
absent
python
def absent(name, api_name, stage_name, nuke_api=False, region=None, key=None, keyid=None, profile=None): ''' Ensure the stage_name associated with the given api_name deployed by boto_apigateway's present state is removed. If the currently associated deployment to the given stage_name has no other stages associated with it, the deployment will also be removed. name Name of the swagger file in YAML format api_name Name of the rest api on AWS ApiGateway to ensure is absent. stage_name Name of the stage to be removed irrespective of the swagger file content. If the current deployment associated with the stage_name has no other stages associated with it, the deployment will also be removed. nuke_api If True, removes the API itself only if there are no other stages associated with any other deployments once the given stage_name is removed. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) swagger = _Swagger(api_name, stage_name, '', None, None, None, common_args) if not swagger.restApiId: ret['comment'] = '[Rest API: {0}] does not exist.'.format(api_name) return ret if __opts__['test']: if nuke_api: ret['comment'] = ('[stage: {0}] will be deleted, if there are no other ' 'active stages, the [api: {1} will also be ' 'deleted.'.format(stage_name, api_name)) else: ret['comment'] = ('[stage: {0}] will be deleted.'.format(stage_name)) ret['result'] = None return ret ret = swagger.delete_stage(ret) if ret.get('abort'): return ret if nuke_api and swagger.no_more_deployments_remain(): ret = swagger.delete_api(ret) except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret
Ensure the stage_name associated with the given api_name deployed by boto_apigateway's present state is removed. If the currently associated deployment to the given stage_name has no other stages associated with it, the deployment will also be removed. name Name of the swagger file in YAML format api_name Name of the rest api on AWS ApiGateway to ensure is absent. stage_name Name of the stage to be removed irrespective of the swagger file content. If the current deployment associated with the stage_name has no other stages associated with it, the deployment will also be removed. nuke_api If True, removes the API itself only if there are no other stages associated with any other deployments once the given stage_name is removed. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_apigateway.py#L368-L443
[ "def no_more_deployments_remain(self):\n '''\n Helper function to find whether there are deployments left with stages associated\n '''\n no_more_deployments = True\n deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId,\n **self._common_aws_args).get('deployments')\n if deployments:\n for deployment in deployments:\n deploymentId = deployment.get('id')\n stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId,\n deploymentId=deploymentId,\n **self._common_aws_args).get('stages')\n if stages:\n no_more_deployments = False\n break\n\n return no_more_deployments\n", "def delete_stage(self, ret):\n '''\n Method to delete the given stage_name. If the current deployment tied to the given\n stage_name has no other stages associated with it, the deployment will be removed\n as well\n '''\n deploymentId = self._get_current_deployment_id()\n if deploymentId:\n result = __salt__['boto_apigateway.delete_api_stage'](restApiId=self.restApiId,\n stageName=self._stage_name,\n **self._common_aws_args)\n if not result.get('deleted'):\n ret['abort'] = True\n ret['result'] = False\n ret['comment'] = 'delete_stage delete_api_stage, {0}'.format(result.get('error'))\n else:\n # check if it is safe to delete the deployment as well.\n if not self._one_or_more_stages_remain(deploymentId):\n result = __salt__['boto_apigateway.delete_api_deployment'](restApiId=self.restApiId,\n deploymentId=deploymentId,\n **self._common_aws_args)\n if not result.get('deleted'):\n ret['abort'] = True\n ret['result'] = False\n ret['comment'] = 'delete_stage delete_api_deployment, {0}'.format(result.get('error'))\n else:\n ret['comment'] = 'stage {0} has been deleted.\\n'.format(self._stage_name)\n else:\n # no matching stage_name/deployment found\n ret['comment'] = 'stage {0} does not exist'.format(self._stage_name)\n\n return ret\n" ]
# -*- coding: utf-8 -*- ''' Manage Apigateway Rest APIs =========================== .. versionadded:: 2016.11.0 :depends: - boto >= 2.8.0 - boto3 >= 1.2.1 - botocore >= 1.4.49 Create and destroy rest apis depending on a swagger version 2 definition file. Be aware that this interacts with Amazon's services, and so may incur charges. This module uses ``boto3``, which can be installed via package, or pip. This module accepts explicit vpc credentials but can also utilize IAM roles assigned to the instance through Instance Profiles. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More information available `here <http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html>`_. If IAM roles are not used you need to specify them either in a pillar file or in the minion's config file: .. code-block:: yaml vpc.keyid: GKTADJGHEIQSXMKKRBJ08H vpc.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs It's also possible to specify ``key``, ``keyid`` and ``region`` via a profile, either passed in as a dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 .. code-block:: yaml Ensure Apigateway API exists: boto_apigateway.present: - name: myfunction - region: us-east-1 - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import hashlib import logging import os import re # Import Salt Libs import salt.utils.files import salt.utils.json import salt.utils.yaml # Import 3rd-party libs from salt.ext import six log = logging.getLogger(__name__) def __virtual__(): ''' Only load if boto is available. ''' return 'boto_apigateway' if 'boto_apigateway.describe_apis' in __salt__ else False def present(name, api_name, swagger_file, stage_name, api_key_required, lambda_integration_role, lambda_region=None, stage_variables=None, region=None, key=None, keyid=None, profile=None, lambda_funcname_format='{stage}_{api}_{resource}_{method}', authorization_type='NONE', error_response_template=None, response_template=None): ''' Ensure the spcified api_name with the corresponding swaggerfile is deployed to the given stage_name in AWS ApiGateway. this state currently only supports ApiGateway integration with AWS Lambda, and CORS support is handled through a Mock integration. There may be multiple deployments for the API object, each deployment is tagged with a description (i.e. unique label) in pretty printed json format consisting of the following key/values. .. code-block:: text { "api_name": api_name, "swagger_file": basename_of_swagger_file "swagger_file_md5sum": md5sum_of_swagger_file, "swagger_info_object": info_object_content_in_swagger_file } Please note that the name of the lambda function to be integrated will be derived via the provided lambda_funcname_format parameters: - the default lambda_funcname_format is a string with the following substitutable keys: "{stage}_{api}_{resource}_{method}". The user can choose to reorder the known keys. - the stage key corresponds to the stage_name passed in. - the api key corresponds to the api_name passed in. - the resource corresponds to the resource path defined in the passed swagger file. - the method corresponds to the method for a resource path defined in the passed swagger file. For the default lambda_funcname_format, given the following input: .. code-block:: python api_name = ' Test Service' stage_name = 'alpha' basePath = '/api' path = '/a/{b}/c' method = 'POST' We will end up with the following Lambda Function Name that will be looked up: 'test_service_alpha_a_b_c_post' The canconicalization of these input parameters is done in the following order: 1. lambda_funcname_format is formatted with the input parameters as passed, 2. resulting string is stripped for leading/trailing spaces, 3. path parameter's curly braces are removed from the resource path, 4. consecutive spaces and forward slashes in the paths are replaced with '_' 5. consecutive '_' are replaced with '_' Please note that for error response handling, the swagger file must have an error response model with the following schema. The lambda functions should throw exceptions for any non successful responses. An optional pattern field can be specified in errorMessage field to aid the response mapping from Lambda to the proper error return status codes. .. code-block:: yaml Error: type: object properties: stackTrace: type: array items: type: array items: type: string description: call stack errorType: type: string description: error type errorMessage: type: string description: | Error message, will be matched based on pattern. If no pattern is specified, the default pattern used for response mapping will be +*. name The name of the state definition api_name The name of the rest api that we want to ensure exists in AWS API Gateway swagger_file Name of the location of the swagger rest api definition file in YAML format. stage_name Name of the stage we want to be associated with the given api_name and swagger_file definition api_key_required True or False - whether the API Key is required to call API methods lambda_integration_role The name or ARN of the IAM role that the AWS ApiGateway assumes when it executes your lambda function to handle incoming requests lambda_region The region where we expect to find the lambda functions. This is used to determine the region where we should look for the Lambda Function for integration purposes. The region determination is based on the following priority: 1. lambda_region as passed in (is not None) 2. if lambda_region is None, use the region as if a boto_lambda function were executed without explicitly specifying lambda region. 3. if region determined in (2) is different than the region used by boto_apigateway functions, a final lookup will be attempted using the boto_apigateway region. stage_variables A dict with variables and their values, or a pillar key (string) that contains a dict with variables and their values. key and values in the dict must be strings. {'string': 'string'} region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. lambda_funcname_format Please review the earlier example for the usage. The only substituable keys in the funcname format are {stage}, {api}, {resource}, {method}. Any other keys or positional subsitution parameters will be flagged as an invalid input. authorization_type This field can be either 'NONE', or 'AWS_IAM'. This will be applied to all methods in the given swagger spec file. Default is set to 'NONE' error_response_template String value that defines the response template mapping that should be applied in cases error occurs. Refer to AWS documentation for details: http://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-mapping-template-reference.html If set to None, the following default value is used: .. code-block:: text '#set($inputRoot = $input.path(\'$\'))\\n' '{\\n' ' "errorMessage" : "$inputRoot.errorMessage",\\n' ' "errorType" : "$inputRoot.errorType",\\n' ' "stackTrace" : [\\n' '#foreach($stackTrace in $inputRoot.stackTrace)\\n' ' [\\n' '#foreach($elem in $stackTrace)\\n' ' "$elem"\\n' '#if($foreach.hasNext),#end\\n' '#end\\n' ' ]\\n' '#if($foreach.hasNext),#end\\n' '#end\\n' ' ]\\n' .. versionadded:: 2017.7.0 response_template String value that defines the response template mapping applied in case of success (including OPTIONS method) If set to None, empty ({}) template is assumed, which will transfer response from the lambda function as is. .. versionadded:: 2017.7.0 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) # try to open the swagger file and basic validation swagger = _Swagger(api_name, stage_name, lambda_funcname_format, swagger_file, error_response_template, response_template, common_args) # retrieve stage variables stage_vars = _get_stage_variables(stage_variables) # verify if api and stage already exists ret = swagger.verify_api(ret) if ret.get('publish'): # there is a deployment label with signature matching the given api_name, # swagger file name, swagger file md5 sum, and swagger file info object # just reassociate the stage_name to the given deployment label. if __opts__['test']: ret['comment'] = ('[stage: {0}] will be reassociated to an already available ' 'deployment that matched the given [api_name: {1}] ' 'and [swagger_file: {2}].\n' 'Stage variables will be set ' 'to {3}.'.format(stage_name, api_name, swagger_file, stage_vars)) ret['result'] = None return ret return swagger.publish_api(ret, stage_vars) if ret.get('current'): # already at desired state for the stage, swagger_file, and api_name if __opts__['test']: ret['comment'] = ('[stage: {0}] is already at desired state with an associated ' 'deployment matching the given [api_name: {1}] ' 'and [swagger_file: {2}].\n' 'Stage variables will be set ' 'to {3}.'.format(stage_name, api_name, swagger_file, stage_vars)) ret['result'] = None return swagger.overwrite_stage_variables(ret, stage_vars) # there doesn't exist any previous deployments for the given swagger_file, we need # to redeploy the content of the swagger file to the api, models, and resources object # and finally create a new deployment and tie the stage_name to this new deployment if __opts__['test']: ret['comment'] = ('There is no deployment matching the given [api_name: {0}] ' 'and [swagger_file: {1}]. A new deployment will be ' 'created and the [stage_name: {2}] will then be associated ' 'to the newly created deployment.\n' 'Stage variables will be set ' 'to {3}.'.format(api_name, swagger_file, stage_name, stage_vars)) ret['result'] = None return ret ret = swagger.deploy_api(ret) if ret.get('abort'): return ret ret = swagger.deploy_models(ret) if ret.get('abort'): return ret ret = swagger.deploy_resources(ret, api_key_required=api_key_required, lambda_integration_role=lambda_integration_role, lambda_region=lambda_region, authorization_type=authorization_type) if ret.get('abort'): return ret ret = swagger.publish_api(ret, stage_vars) except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret def _get_stage_variables(stage_variables): ''' Helper function to retrieve stage variables from pillars/options, if the input is a string ''' ret = dict() if stage_variables is None: return ret if isinstance(stage_variables, six.string_types): if stage_variables in __opts__: ret = __opts__[stage_variables] master_opts = __pillar__.get('master', {}) if stage_variables in master_opts: ret = master_opts[stage_variables] if stage_variables in __pillar__: ret = __pillar__[stage_variables] elif isinstance(stage_variables, dict): ret = stage_variables if not isinstance(ret, dict): ret = dict() return ret # Helper Swagger Class for swagger version 2.0 API specification def _gen_md5_filehash(fname, *args): ''' helper function to generate a md5 hash of the swagger definition file any extra argument passed to the function is converted to a string and participates in the hash calculation ''' _hash = hashlib.md5() with salt.utils.files.fopen(fname, 'rb') as f: for chunk in iter(lambda: f.read(4096), b''): _hash.update(chunk) for extra_arg in args: _hash.update(six.b(str(extra_arg))) return _hash.hexdigest() def _dict_to_json_pretty(d, sort_keys=True): ''' helper function to generate pretty printed json output ''' return salt.utils.json.dumps(d, indent=4, separators=(',', ': '), sort_keys=sort_keys) # Heuristic on whether or not the property name loosely matches given set of 'interesting' factors # If you are interested in IDs for example, 'id', 'blah_id', 'blahId' would all match def _name_matches(name, matches): ''' Helper function to see if given name has any of the patterns in given matches ''' for m in matches: if name.endswith(m): return True if name.lower().endswith('_' + m.lower()): return True if name.lower() == m.lower(): return True return False def _object_reducer(o, names=('id', 'name', 'path', 'httpMethod', 'statusCode', 'Created', 'Deleted', 'Updated', 'Flushed', 'Associated', 'Disassociated')): ''' Helper function to reduce the amount of information that will be kept in the change log for API GW related return values ''' result = {} if isinstance(o, dict): for k, v in six.iteritems(o): if isinstance(v, dict): reduced = v if k == 'variables' else _object_reducer(v, names) if reduced or _name_matches(k, names): result[k] = reduced elif isinstance(v, list): newlist = [] for val in v: reduced = _object_reducer(val, names) if reduced or _name_matches(k, names): newlist.append(reduced) if newlist: result[k] = newlist else: if _name_matches(k, names): result[k] = v return result def _log_changes(ret, changekey, changevalue): ''' For logging create/update/delete operations to AWS ApiGateway ''' cl = ret['changes'].get('new', []) cl.append({changekey: _object_reducer(changevalue)}) ret['changes']['new'] = cl return ret def _log_error_and_abort(ret, obj): ''' helper function to update errors in the return structure ''' ret['result'] = False ret['abort'] = True if 'error' in obj: ret['comment'] = '{0}'.format(obj.get('error')) return ret class _Swagger(object): ''' this is a helper class that holds the swagger definition file and the associated logic related to how to interpret the file and apply it to AWS Api Gateway. The main interface to the outside world is in deploy_api, deploy_models, and deploy_resources methods. ''' SWAGGER_OBJ_V2_FIELDS = ('swagger', 'info', 'host', 'basePath', 'schemes', 'consumes', 'produces', 'paths', 'definitions', 'parameters', 'responses', 'securityDefinitions', 'security', 'tags', 'externalDocs') # SWAGGER OBJECT V2 Fields that are required by boto apigateway states. SWAGGER_OBJ_V2_FIELDS_REQUIRED = ('swagger', 'info', 'basePath', 'schemes', 'paths', 'definitions') # SWAGGER OPERATION NAMES SWAGGER_OPERATION_NAMES = ('get', 'put', 'post', 'delete', 'options', 'head', 'patch') SWAGGER_VERSIONS_SUPPORTED = ('2.0',) # VENDOR SPECIFIC FIELD PATTERNS VENDOR_EXT_PATTERN = re.compile('^x-') # JSON_SCHEMA_REF JSON_SCHEMA_DRAFT_4 = 'http://json-schema.org/draft-04/schema#' # AWS integration templates for normal and options methods REQUEST_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n' '{\n' '"header_params" : {\n' '#set ($map = $input.params().header)\n' '#foreach( $param in $map.entrySet() )\n' '"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n' '#end\n' '},\n' '"query_params" : {\n' '#set ($map = $input.params().querystring)\n' '#foreach( $param in $map.entrySet() )\n' '"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n' '#end\n' '},\n' '"path_params" : {\n' '#set ($map = $input.params().path)\n' '#foreach( $param in $map.entrySet() )\n' '"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n' '#end\n' '},\n' '"apigw_context" : {\n' '"apiId": "$context.apiId",\n' '"httpMethod": "$context.httpMethod",\n' '"requestId": "$context.requestId",\n' '"resourceId": "$context.resourceId",\n' '"resourcePath": "$context.resourcePath",\n' '"stage": "$context.stage",\n' '"identity": {\n' ' "user":"$context.identity.user",\n' ' "userArn":"$context.identity.userArn",\n' ' "userAgent":"$context.identity.userAgent",\n' ' "sourceIp":"$context.identity.sourceIp",\n' ' "cognitoIdentityId":"$context.identity.cognitoIdentityId",\n' ' "cognitoIdentityPoolId":"$context.identity.cognitoIdentityPoolId",\n' ' "cognitoAuthenticationType":"$context.identity.cognitoAuthenticationType",\n' ' "cognitoAuthenticationProvider":["$util.escapeJavaScript($context.identity.cognitoAuthenticationProvider)"],\n' ' "caller":"$context.identity.caller",\n' ' "apiKey":"$context.identity.apiKey",\n' ' "accountId":"$context.identity.accountId"\n' '}\n' '},\n' '"body_params" : $input.json(\'$\'),\n' '"stage_variables": {\n' '#foreach($variable in $stageVariables.keySet())\n' '"$variable": "$util.escapeJavaScript($stageVariables.get($variable))"\n' '#if($foreach.hasNext), #end\n' '#end\n' '}\n' '}'} REQUEST_OPTION_TEMPLATE = {'application/json': '{"statusCode": 200}'} # AWS integration response template mapping to convert stackTrace part or the error # to a uniform format containing strings only. Swagger does not seem to allow defining # an array of non-uniform types, to it is not possible to create error model to match # exactly what comes out of lambda functions in case of error. RESPONSE_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n' '{\n' ' "errorMessage" : "$inputRoot.errorMessage",\n' ' "errorType" : "$inputRoot.errorType",\n' ' "stackTrace" : [\n' '#foreach($stackTrace in $inputRoot.stackTrace)\n' ' [\n' '#foreach($elem in $stackTrace)\n' ' "$elem"\n' '#if($foreach.hasNext),#end\n' '#end\n' ' ]\n' '#if($foreach.hasNext),#end\n' '#end\n' ' ]\n' '}'} RESPONSE_OPTION_TEMPLATE = {} # This string should not be modified, every API created by this state will carry the description # below. AWS_API_DESCRIPTION = _dict_to_json_pretty({"provisioned_by": "Salt boto_apigateway.present State", "context": "See deployment or stage description"}) class SwaggerParameter(object): ''' This is a helper class for the Swagger Parameter Object ''' LOCATIONS = ('body', 'query', 'header', 'path') def __init__(self, paramdict): self._paramdict = paramdict @property def location(self): ''' returns location in the swagger parameter object ''' _location = self._paramdict.get('in') if _location in _Swagger.SwaggerParameter.LOCATIONS: return _location raise ValueError('Unsupported parameter location: {0} in Parameter Object'.format(_location)) @property def name(self): ''' returns parameter name in the swagger parameter object ''' _name = self._paramdict.get('name') if _name: if self.location == 'header': return 'method.request.header.{0}'.format(_name) elif self.location == 'query': return 'method.request.querystring.{0}'.format(_name) elif self.location == 'path': return 'method.request.path.{0}'.format(_name) return None raise ValueError('Parameter must have a name: {0}'.format(_dict_to_json_pretty(self._paramdict))) @property def schema(self): ''' returns the name of the schema given the reference in the swagger parameter object ''' if self.location == 'body': _schema = self._paramdict.get('schema') if _schema: if '$ref' in _schema: schema_name = _schema.get('$ref').split('/')[-1] return schema_name raise ValueError(('Body parameter must have a JSON reference ' 'to the schema definition due to Amazon API restrictions: {0}'.format(self.name))) raise ValueError('Body parameter must have a schema: {0}'.format(self.name)) return None class SwaggerMethodResponse(object): ''' Helper class for Swagger Method Response Object ''' def __init__(self, r): self._r = r @property def schema(self): ''' returns the name of the schema given the reference in the swagger method response object ''' _schema = self._r.get('schema') if _schema: if '$ref' in _schema: return _schema.get('$ref').split('/')[-1] raise ValueError(('Method response must have a JSON reference ' 'to the schema definition: {0}'.format(_schema))) return None @property def headers(self): ''' returns the headers dictionary in the method response object ''' _headers = self._r.get('headers', {}) return _headers def __init__(self, api_name, stage_name, lambda_funcname_format, swagger_file_path, error_response_template, response_template, common_aws_args): self._api_name = api_name self._stage_name = stage_name self._lambda_funcname_format = lambda_funcname_format self._common_aws_args = common_aws_args self._restApiId = '' self._deploymentId = '' self._error_response_template = error_response_template self._response_template = response_template if swagger_file_path is not None: if os.path.exists(swagger_file_path) and os.path.isfile(swagger_file_path): self._swagger_file = swagger_file_path self._md5_filehash = _gen_md5_filehash(self._swagger_file, error_response_template, response_template) with salt.utils.files.fopen(self._swagger_file, 'rb') as sf: self._cfg = salt.utils.yaml.safe_load(sf) self._swagger_version = '' else: raise IOError('Invalid swagger file path, {0}'.format(swagger_file_path)) self._validate_swagger_file() self._validate_lambda_funcname_format() self._resolve_api_id() def _is_http_error_rescode(self, code): ''' Helper function to determine if the passed code is in the 400~599 range of http error codes ''' return bool(re.match(r'^\s*[45]\d\d\s*$', code)) def _validate_error_response_model(self, paths, mods): ''' Helper function to help validate the convention established in the swagger file on how to handle response code mapping/integration ''' for path, ops in paths: for opname, opobj in six.iteritems(ops): if opname not in _Swagger.SWAGGER_OPERATION_NAMES: continue if 'responses' not in opobj: raise ValueError('missing mandatory responses field in path item object') for rescode, resobj in six.iteritems(opobj.get('responses')): if not self._is_http_error_rescode(str(rescode)): # future lint: disable=blacklisted-function continue # only check for response code from 400-599 if 'schema' not in resobj: raise ValueError('missing schema field in path {0}, ' 'op {1}, response {2}'.format(path, opname, rescode)) schemaobj = resobj.get('schema') if '$ref' not in schemaobj: raise ValueError('missing $ref field under schema in ' 'path {0}, op {1}, response {2}'.format(path, opname, rescode)) schemaobjref = schemaobj.get('$ref', '/') modelname = schemaobjref.split('/')[-1] if modelname not in mods: raise ValueError('model schema {0} reference not found ' 'under /definitions'.format(schemaobjref)) model = mods.get(modelname) if model.get('type') != 'object': raise ValueError('model schema {0} must be type object'.format(modelname)) if 'properties' not in model: raise ValueError('model schema {0} must have properties fields'.format(modelname)) modelprops = model.get('properties') if 'errorMessage' not in modelprops: raise ValueError('model schema {0} must have errorMessage as a property to ' 'match AWS convention. If pattern is not set, .+ will ' 'be used'.format(modelname)) def _validate_lambda_funcname_format(self): ''' Checks if the lambda function name format contains only known elements :return: True on success, ValueError raised on error ''' try: if self._lambda_funcname_format: known_kwargs = dict(stage='', api='', resource='', method='') self._lambda_funcname_format.format(**known_kwargs) return True except Exception: raise ValueError('Invalid lambda_funcname_format {0}. Please review ' 'documentation for known substitutable keys'.format(self._lambda_funcname_format)) def _validate_swagger_file(self): ''' High level check/validation of the input swagger file based on https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md This is not a full schema compliance check, but rather make sure that the input file (YAML or JSON) can be read into a dictionary, and we check for the content of the Swagger Object for version and info. ''' # check for any invalid fields for Swagger Object V2 for field in self._cfg: if (field not in _Swagger.SWAGGER_OBJ_V2_FIELDS and not _Swagger.VENDOR_EXT_PATTERN.match(field)): raise ValueError('Invalid Swagger Object Field: {0}'.format(field)) # check for Required Swagger fields by Saltstack boto apigateway state for field in _Swagger.SWAGGER_OBJ_V2_FIELDS_REQUIRED: if field not in self._cfg: raise ValueError('Missing Swagger Object Field: {0}'.format(field)) # check for Swagger Version self._swagger_version = self._cfg.get('swagger') if self._swagger_version not in _Swagger.SWAGGER_VERSIONS_SUPPORTED: raise ValueError('Unsupported Swagger version: {0},' 'Supported versions are {1}'.format(self._swagger_version, _Swagger.SWAGGER_VERSIONS_SUPPORTED)) log.info(type(self._models)) self._validate_error_response_model(self.paths, self._models()) @property def md5_filehash(self): ''' returns md5 hash for the swagger file ''' return self._md5_filehash @property def info(self): ''' returns the swagger info object as a dictionary ''' info = self._cfg.get('info') if not info: raise ValueError('Info Object has no values') return info @property def info_json(self): ''' returns the swagger info object as a pretty printed json string. ''' return _dict_to_json_pretty(self.info) @property def rest_api_name(self): ''' returns the name of the api ''' return self._api_name @property def rest_api_version(self): ''' returns the version field in the swagger info object ''' version = self.info.get('version') if not version: raise ValueError('Missing version value in Info Object') return version def _models(self): ''' returns an iterator for the models specified in the swagger file ''' models = self._cfg.get('definitions') if not models: raise ValueError('Definitions Object has no values, You need to define them in your swagger file') return models def models(self): ''' generator to return the tuple of model and its schema to create on aws. ''' model_dict = self._build_all_dependencies() while True: model = self._get_model_without_dependencies(model_dict) if not model: break yield (model, self._models().get(model)) @property def paths(self): ''' returns an iterator for the relative resource paths specified in the swagger file ''' paths = self._cfg.get('paths') if not paths: raise ValueError('Paths Object has no values, You need to define them in your swagger file') for path in paths: if not path.startswith('/'): raise ValueError('Path object {0} should start with /. Please fix it'.format(path)) return six.iteritems(paths) @property def basePath(self): ''' returns the base path field as defined in the swagger file ''' basePath = self._cfg.get('basePath', '') return basePath @property def restApiId(self): ''' returns the rest api id as returned by AWS on creation of the rest api ''' return self._restApiId @restApiId.setter def restApiId(self, restApiId): ''' allows the assignment of the rest api id on creation of the rest api ''' self._restApiId = restApiId @property def deployment_label_json(self): ''' this property returns the unique description in pretty printed json for a particular api deployment ''' return _dict_to_json_pretty(self.deployment_label) @property def deployment_label(self): ''' this property returns the deployment label dictionary (mainly used by stage description) ''' label = dict() label['swagger_info_object'] = self.info label['api_name'] = self.rest_api_name label['swagger_file'] = os.path.basename(self._swagger_file) label['swagger_file_md5sum'] = self.md5_filehash return label # methods to interact with boto_apigateway execution modules def _one_or_more_stages_remain(self, deploymentId): ''' Helper function to find whether there are other stages still associated with a deployment ''' stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args).get('stages') return bool(stages) def no_more_deployments_remain(self): ''' Helper function to find whether there are deployments left with stages associated ''' no_more_deployments = True deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId, **self._common_aws_args).get('deployments') if deployments: for deployment in deployments: deploymentId = deployment.get('id') stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args).get('stages') if stages: no_more_deployments = False break return no_more_deployments def _get_current_deployment_id(self): ''' Helper method to find the deployment id that the stage name is currently assocaited with. ''' deploymentId = '' stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, **self._common_aws_args).get('stage') if stage: deploymentId = stage.get('deploymentId') return deploymentId def _get_current_deployment_label(self): ''' Helper method to find the deployment label that the stage_name is currently associated with. ''' deploymentId = self._get_current_deployment_id() deployment = __salt__['boto_apigateway.describe_api_deployment'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args).get('deployment') if deployment: return deployment.get('description') return None def _get_desired_deployment_id(self): ''' Helper method to return the deployment id matching the desired deployment label for this Swagger object based on the given api_name, swagger_file ''' deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId, **self._common_aws_args).get('deployments') if deployments: for deployment in deployments: if deployment.get('description') == self.deployment_label_json: return deployment.get('id') return '' def overwrite_stage_variables(self, ret, stage_variables): ''' overwrite the given stage_name's stage variables with the given stage_variables ''' res = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId, stageName=self._stage_name, variables=stage_variables, **self._common_aws_args) if not res.get('overwrite'): ret['result'] = False ret['abort'] = True ret['comment'] = res.get('error') else: ret = _log_changes(ret, 'overwrite_stage_variables', res.get('stage')) return ret def _set_current_deployment(self, stage_desc_json, stage_variables): ''' Helper method to associate the stage_name to the given deploymentId and make this current ''' stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, **self._common_aws_args).get('stage') if not stage: stage = __salt__['boto_apigateway.create_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, deploymentId=self._deploymentId, description=stage_desc_json, variables=stage_variables, **self._common_aws_args) if not stage.get('stage'): return {'set': False, 'error': stage.get('error')} else: # overwrite the stage variables overwrite = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId, stageName=self._stage_name, variables=stage_variables, **self._common_aws_args) if not overwrite.get('stage'): return {'set': False, 'error': overwrite.get('error')} return __salt__['boto_apigateway.activate_api_deployment'](restApiId=self.restApiId, stageName=self._stage_name, deploymentId=self._deploymentId, **self._common_aws_args) def _resolve_api_id(self): ''' returns an Api Id that matches the given api_name and the hardcoded _Swagger.AWS_API_DESCRIPTION as the api description ''' apis = __salt__['boto_apigateway.describe_apis'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args).get('restapi') if apis: if len(apis) == 1: self.restApiId = apis[0].get('id') else: raise ValueError('Multiple APIs matching given name {0} and ' 'description {1}'.format(self.rest_api_name, self.info_json)) def delete_stage(self, ret): ''' Method to delete the given stage_name. If the current deployment tied to the given stage_name has no other stages associated with it, the deployment will be removed as well ''' deploymentId = self._get_current_deployment_id() if deploymentId: result = __salt__['boto_apigateway.delete_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, **self._common_aws_args) if not result.get('deleted'): ret['abort'] = True ret['result'] = False ret['comment'] = 'delete_stage delete_api_stage, {0}'.format(result.get('error')) else: # check if it is safe to delete the deployment as well. if not self._one_or_more_stages_remain(deploymentId): result = __salt__['boto_apigateway.delete_api_deployment'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args) if not result.get('deleted'): ret['abort'] = True ret['result'] = False ret['comment'] = 'delete_stage delete_api_deployment, {0}'.format(result.get('error')) else: ret['comment'] = 'stage {0} has been deleted.\n'.format(self._stage_name) else: # no matching stage_name/deployment found ret['comment'] = 'stage {0} does not exist'.format(self._stage_name) return ret def verify_api(self, ret): ''' this method helps determine if the given stage_name is already on a deployment label matching the input api_name, swagger_file. If yes, returns abort with comment indicating already at desired state. If not and there is previous deployment labels in AWS matching the given input api_name and swagger file, indicate to the caller that we only need to reassociate stage_name to the previously existing deployment label. ''' if self.restApiId: deployed_label_json = self._get_current_deployment_label() if deployed_label_json == self.deployment_label_json: ret['comment'] = ('Already at desired state, the stage {0} is already at the desired ' 'deployment label:\n{1}'.format(self._stage_name, deployed_label_json)) ret['current'] = True return ret else: self._deploymentId = self._get_desired_deployment_id() if self._deploymentId: ret['publish'] = True return ret def publish_api(self, ret, stage_variables): ''' this method tie the given stage_name to a deployment matching the given swagger_file ''' stage_desc = dict() stage_desc['current_deployment_label'] = self.deployment_label stage_desc_json = _dict_to_json_pretty(stage_desc) if self._deploymentId: # just do a reassociate of stage_name to an already existing deployment res = self._set_current_deployment(stage_desc_json, stage_variables) if not res.get('set'): ret['abort'] = True ret['result'] = False ret['comment'] = res.get('error') else: ret = _log_changes(ret, 'publish_api (reassociate deployment, set stage_variables)', res.get('response')) else: # no deployment existed for the given swagger_file for this Swagger object res = __salt__['boto_apigateway.create_api_deployment'](restApiId=self.restApiId, stageName=self._stage_name, stageDescription=stage_desc_json, description=self.deployment_label_json, variables=stage_variables, **self._common_aws_args) if not res.get('created'): ret['abort'] = True ret['result'] = False ret['comment'] = res.get('error') else: ret = _log_changes(ret, 'publish_api (new deployment)', res.get('deployment')) return ret def _cleanup_api(self): ''' Helper method to clean up resources and models if we detected a change in the swagger file for a stage ''' resources = __salt__['boto_apigateway.describe_api_resources'](restApiId=self.restApiId, **self._common_aws_args) if resources.get('resources'): res = resources.get('resources')[1:] res.reverse() for resource in res: delres = __salt__['boto_apigateway.delete_api_resources'](restApiId=self.restApiId, path=resource.get('path'), **self._common_aws_args) if not delres.get('deleted'): return delres models = __salt__['boto_apigateway.describe_api_models'](restApiId=self.restApiId, **self._common_aws_args) if models.get('models'): for model in models.get('models'): delres = __salt__['boto_apigateway.delete_api_model'](restApiId=self.restApiId, modelName=model.get('name'), **self._common_aws_args) if not delres.get('deleted'): return delres return {'deleted': True} def deploy_api(self, ret): ''' this method create the top level rest api in AWS apigateway ''' if self.restApiId: res = self._cleanup_api() if not res.get('deleted'): ret['comment'] = 'Failed to cleanup restAreId {0}'.format(self.restApiId) ret['abort'] = True ret['result'] = False return ret return ret response = __salt__['boto_apigateway.create_api'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args) if not response.get('created'): ret['result'] = False ret['abort'] = True if 'error' in response: ret['comment'] = 'Failed to create rest api: {0}.'.format(response['error']['message']) return ret self.restApiId = response.get('restapi', {}).get('id') return _log_changes(ret, 'deploy_api', response.get('restapi')) def delete_api(self, ret): ''' Method to delete a Rest Api named defined in the swagger file's Info Object's title value. ret a dictionary for returning status to Saltstack ''' exists_response = __salt__['boto_apigateway.api_exists'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args) if exists_response.get('exists'): if __opts__['test']: ret['comment'] = 'Rest API named {0} is set to be deleted.'.format(self.rest_api_name) ret['result'] = None ret['abort'] = True return ret delete_api_response = __salt__['boto_apigateway.delete_api'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args) if not delete_api_response.get('deleted'): ret['result'] = False ret['abort'] = True if 'error' in delete_api_response: ret['comment'] = 'Failed to delete rest api: {0}.'.format(delete_api_response['error']['message']) return ret ret = _log_changes(ret, 'delete_api', delete_api_response) else: ret['comment'] = ('api already absent for swagger file: ' '{0}, desc: {1}'.format(self.rest_api_name, self.info_json)) return ret def _aws_model_ref_from_swagger_ref(self, r): ''' Helper function to reference models created on aws apigw ''' model_name = r.split('/')[-1] return 'https://apigateway.amazonaws.com/restapis/{0}/models/{1}'.format(self.restApiId, model_name) def _update_schema_to_aws_notation(self, schema): ''' Helper function to map model schema to aws notation ''' result = {} for k, v in schema.items(): if k == '$ref': v = self._aws_model_ref_from_swagger_ref(v) if isinstance(v, dict): v = self._update_schema_to_aws_notation(v) result[k] = v return result def _build_dependent_model_list(self, obj_schema): ''' Helper function to build the list of models the given object schema is referencing. ''' dep_models_list = [] if obj_schema: obj_schema['type'] = obj_schema.get('type', 'object') if obj_schema['type'] == 'array': dep_models_list.extend(self._build_dependent_model_list(obj_schema.get('items', {}))) else: ref = obj_schema.get('$ref') if ref: ref_obj_model = ref.split("/")[-1] ref_obj_schema = self._models().get(ref_obj_model) dep_models_list.extend(self._build_dependent_model_list(ref_obj_schema)) dep_models_list.extend([ref_obj_model]) else: # need to walk each property object properties = obj_schema.get('properties') if properties: for _, prop_obj_schema in six.iteritems(properties): dep_models_list.extend(self._build_dependent_model_list(prop_obj_schema)) return list(set(dep_models_list)) def _build_all_dependencies(self): ''' Helper function to build a map of model to their list of model reference dependencies ''' ret = {} for model, schema in six.iteritems(self._models()): dep_list = self._build_dependent_model_list(schema) ret[model] = dep_list return ret def _get_model_without_dependencies(self, models_dict): ''' Helper function to find the next model that should be created ''' next_model = None if not models_dict: return next_model for model, dependencies in six.iteritems(models_dict): if dependencies == []: next_model = model break if next_model is None: raise ValueError('incomplete model definitions, models in dependency ' 'list not defined: {0}'.format(models_dict)) # remove the model from other depednencies before returning models_dict.pop(next_model) for model, dep_list in six.iteritems(models_dict): if next_model in dep_list: dep_list.remove(next_model) return next_model def deploy_models(self, ret): ''' Method to deploy swagger file's definition objects and associated schema to AWS Apigateway as Models ret a dictionary for returning status to Saltstack ''' for model, schema in self.models(): # add in a few attributes into the model schema that AWS expects # _schema = schema.copy() _schema = self._update_schema_to_aws_notation(schema) _schema.update({'$schema': _Swagger.JSON_SCHEMA_DRAFT_4, 'title': '{0} Schema'.format(model)}) # check to see if model already exists, aws has 2 default models [Empty, Error] # which may need upate with data from swagger file model_exists_response = __salt__['boto_apigateway.api_model_exists'](restApiId=self.restApiId, modelName=model, **self._common_aws_args) if model_exists_response.get('exists'): update_model_schema_response = ( __salt__['boto_apigateway.update_api_model_schema'](restApiId=self.restApiId, modelName=model, schema=_dict_to_json_pretty(_schema), **self._common_aws_args)) if not update_model_schema_response.get('updated'): ret['result'] = False ret['abort'] = True if 'error' in update_model_schema_response: ret['comment'] = ('Failed to update existing model {0} with schema {1}, ' 'error: {2}'.format(model, _dict_to_json_pretty(schema), update_model_schema_response['error']['message'])) return ret ret = _log_changes(ret, 'deploy_models', update_model_schema_response) else: create_model_response = ( __salt__['boto_apigateway.create_api_model'](restApiId=self.restApiId, modelName=model, modelDescription=model, schema=_dict_to_json_pretty(_schema), contentType='application/json', **self._common_aws_args)) if not create_model_response.get('created'): ret['result'] = False ret['abort'] = True if 'error' in create_model_response: ret['comment'] = ('Failed to create model {0}, schema {1}, ' 'error: {2}'.format(model, _dict_to_json_pretty(schema), create_model_response['error']['message'])) return ret ret = _log_changes(ret, 'deploy_models', create_model_response) return ret def _lambda_name(self, resourcePath, httpMethod): ''' Helper method to construct lambda name based on the rule specified in doc string of boto_apigateway.api_present function ''' lambda_name = self._lambda_funcname_format.format(stage=self._stage_name, api=self.rest_api_name, resource=resourcePath, method=httpMethod) lambda_name = lambda_name.strip() lambda_name = re.sub(r'{|}', '', lambda_name) lambda_name = re.sub(r'\s+|/', '_', lambda_name).lower() return re.sub(r'_+', '_', lambda_name) def _lambda_uri(self, lambda_name, lambda_region): ''' Helper Method to construct the lambda uri for use in method integration ''' profile = self._common_aws_args.get('profile') region = self._common_aws_args.get('region') lambda_region = __utils__['boto3.get_region']('lambda', lambda_region, profile) apigw_region = __utils__['boto3.get_region']('apigateway', region, profile) lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args) if lambda_region != apigw_region: if not lambda_desc.get('function'): # try look up in the same region as the apigateway as well if previous lookup failed lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args) if not lambda_desc.get('function'): raise ValueError('Could not find lambda function {0} in ' 'regions [{1}, {2}].'.format(lambda_name, lambda_region, apigw_region)) lambda_arn = lambda_desc.get('function').get('FunctionArn') lambda_uri = ('arn:aws:apigateway:{0}:lambda:path/2015-03-31' '/functions/{1}/invocations'.format(apigw_region, lambda_arn)) return lambda_uri def _parse_method_data(self, method_name, method_data): ''' Helper function to construct the method request params, models, request_templates and integration_type values needed to configure method request integration/mappings. ''' method_params = {} method_models = {} if 'parameters' in method_data: for param in method_data['parameters']: p = _Swagger.SwaggerParameter(param) if p.name: method_params[p.name] = True if p.schema: method_models['application/json'] = p.schema request_templates = _Swagger.REQUEST_OPTION_TEMPLATE if method_name == 'options' else _Swagger.REQUEST_TEMPLATE integration_type = "MOCK" if method_name == 'options' else "AWS" return {'params': method_params, 'models': method_models, 'request_templates': request_templates, 'integration_type': integration_type} def _find_patterns(self, o): result = [] if isinstance(o, dict): for k, v in six.iteritems(o): if isinstance(v, dict): result.extend(self._find_patterns(v)) else: if k == 'pattern': result.append(v) return result def _get_pattern_for_schema(self, schema_name, httpStatus): ''' returns the pattern specified in a response schema ''' defaultPattern = '.+' if self._is_http_error_rescode(httpStatus) else '.*' model = self._models().get(schema_name) patterns = self._find_patterns(model) return patterns[0] if patterns else defaultPattern def _get_response_template(self, method_name, http_status): if method_name == 'options' or not self._is_http_error_rescode(http_status): response_templates = {'application/json': self._response_template} \ if self._response_template else self.RESPONSE_OPTION_TEMPLATE else: response_templates = {'application/json': self._error_response_template} \ if self._error_response_template else self.RESPONSE_TEMPLATE return response_templates def _parse_method_response(self, method_name, method_response, httpStatus): ''' Helper function to construct the method response params, models, and integration_params values needed to configure method response integration/mappings. ''' method_response_models = {} method_response_pattern = '.*' if method_response.schema: method_response_models['application/json'] = method_response.schema method_response_pattern = self._get_pattern_for_schema(method_response.schema, httpStatus) method_response_params = {} method_integration_response_params = {} for header in method_response.headers: response_header = 'method.response.header.{0}'.format(header) method_response_params[response_header] = False header_data = method_response.headers.get(header) method_integration_response_params[response_header] = ( "'{0}'".format(header_data.get('default')) if 'default' in header_data else "'*'") response_templates = self._get_response_template(method_name, httpStatus) return {'params': method_response_params, 'models': method_response_models, 'integration_params': method_integration_response_params, 'pattern': method_response_pattern, 'response_templates': response_templates} def _deploy_method(self, ret, resource_path, method_name, method_data, api_key_required, lambda_integration_role, lambda_region, authorization_type): ''' Method to create a method for the given resource path, along with its associated request and response integrations. ret a dictionary for returning status to Saltstack resource_path the full resource path where the named method_name will be associated with. method_name a string that is one of the following values: 'delete', 'get', 'head', 'options', 'patch', 'post', 'put' method_data the value dictionary for this method in the swagger definition file. api_key_required True or False, whether api key is required to access this method. lambda_integration_role name of the IAM role or IAM role arn that Api Gateway will assume when executing the associated lambda function lambda_region the region for the lambda function that Api Gateway will integrate to. authorization_type 'NONE' or 'AWS_IAM' ''' method = self._parse_method_data(method_name.lower(), method_data) # for options method to enable CORS, api_key_required will be set to False always. # authorization_type will be set to 'NONE' always. if method_name.lower() == 'options': api_key_required = False authorization_type = 'NONE' m = __salt__['boto_apigateway.create_api_method'](restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), authorizationType=authorization_type, apiKeyRequired=api_key_required, requestParameters=method.get('params'), requestModels=method.get('models'), **self._common_aws_args) if not m.get('created'): ret = _log_error_and_abort(ret, m) return ret ret = _log_changes(ret, '_deploy_method.create_api_method', m) lambda_uri = "" if method_name.lower() != 'options': lambda_uri = self._lambda_uri(self._lambda_name(resource_path, method_name), lambda_region=lambda_region) # NOTE: integration method is set to POST always, as otherwise AWS makes wrong assumptions # about the intent of the call. HTTP method will be passed to lambda as part of the API gateway context integration = ( __salt__['boto_apigateway.create_api_integration'](restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), integrationType=method.get('integration_type'), integrationHttpMethod='POST', uri=lambda_uri, credentials=lambda_integration_role, requestTemplates=method.get('request_templates'), **self._common_aws_args)) if not integration.get('created'): ret = _log_error_and_abort(ret, integration) return ret ret = _log_changes(ret, '_deploy_method.create_api_integration', integration) if 'responses' in method_data: for response, response_data in six.iteritems(method_data['responses']): httpStatus = str(response) # future lint: disable=blacklisted-function method_response = self._parse_method_response(method_name.lower(), _Swagger.SwaggerMethodResponse(response_data), httpStatus) mr = __salt__['boto_apigateway.create_api_method_response']( restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), statusCode=httpStatus, responseParameters=method_response.get('params'), responseModels=method_response.get('models'), **self._common_aws_args) if not mr.get('created'): ret = _log_error_and_abort(ret, mr) return ret ret = _log_changes(ret, '_deploy_method.create_api_method_response', mr) mir = __salt__['boto_apigateway.create_api_integration_response']( restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), statusCode=httpStatus, selectionPattern=method_response.get('pattern'), responseParameters=method_response.get('integration_params'), responseTemplates=method_response.get('response_templates'), **self._common_aws_args) if not mir.get('created'): ret = _log_error_and_abort(ret, mir) return ret ret = _log_changes(ret, '_deploy_method.create_api_integration_response', mir) else: raise ValueError('No responses specified for {0} {1}'.format(resource_path, method_name)) return ret def deploy_resources(self, ret, api_key_required, lambda_integration_role, lambda_region, authorization_type): ''' Method to deploy resources defined in the swagger file. ret a dictionary for returning status to Saltstack api_key_required True or False, whether api key is required to access this method. lambda_integration_role name of the IAM role or IAM role arn that Api Gateway will assume when executing the associated lambda function lambda_region the region for the lambda function that Api Gateway will integrate to. authorization_type 'NONE' or 'AWS_IAM' ''' for path, pathData in self.paths: resource = __salt__['boto_apigateway.create_api_resources'](restApiId=self.restApiId, path=path, **self._common_aws_args) if not resource.get('created'): ret = _log_error_and_abort(ret, resource) return ret ret = _log_changes(ret, 'deploy_resources', resource) for method, method_data in six.iteritems(pathData): if method in _Swagger.SWAGGER_OPERATION_NAMES: ret = self._deploy_method(ret, path, method, method_data, api_key_required, lambda_integration_role, lambda_region, authorization_type) return ret def usage_plan_present(name, plan_name, description=None, throttle=None, quota=None, region=None, key=None, keyid=None, profile=None): ''' Ensure the spcifieda usage plan with the corresponding metrics is deployed .. versionadded:: 2017.7.0 name name of the state plan_name [Required] name of the usage plan throttle [Optional] throttling parameters expressed as a dictionary. If provided, at least one of the throttling parameters must be present rateLimit rate per second at which capacity bucket is populated burstLimit maximum rate allowed quota [Optional] quota on the number of api calls permitted by the plan. If provided, limit and period must be present limit [Required] number of calls permitted per quota period offset [Optional] number of calls to be subtracted from the limit at the beginning of the period period [Required] period to which quota applies. Must be DAY, WEEK or MONTH .. code-block:: yaml UsagePlanPresent: boto_apigateway.usage_plan_present: - plan_name: my_usage_plan - throttle: rateLimit: 70 burstLimit: 100 - quota: limit: 1000 offset: 0 period: DAY - profile: my_profile ''' func_params = locals() ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) existing = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args) if 'error' in existing: ret['result'] = False ret['comment'] = 'Failed to describe existing usage plans' return ret if not existing['plans']: # plan does not exist, we need to create it if __opts__['test']: ret['comment'] = 'a new usage plan {0} would be created'.format(plan_name) ret['result'] = None return ret result = __salt__['boto_apigateway.create_usage_plan'](name=plan_name, description=description, throttle=throttle, quota=quota, **common_args) if 'error' in result: ret['result'] = False ret['comment'] = 'Failed to create a usage plan {0}, {1}'.format(plan_name, result['error']) return ret ret['changes']['old'] = {'plan': None} ret['comment'] = 'A new usage plan {0} has been created'.format(plan_name) else: # need an existing plan modified to match given value plan = existing['plans'][0] needs_updating = False modifiable_params = (('throttle', ('rateLimit', 'burstLimit')), ('quota', ('limit', 'offset', 'period'))) for p, fields in modifiable_params: for f in fields: actual_param = {} if func_params.get(p) is None else func_params.get(p) if plan.get(p, {}).get(f, None) != actual_param.get(f, None): needs_updating = True break if not needs_updating: ret['comment'] = 'usage plan {0} is already in a correct state'.format(plan_name) ret['result'] = True return ret if __opts__['test']: ret['comment'] = 'a new usage plan {0} would be updated'.format(plan_name) ret['result'] = None return ret result = __salt__['boto_apigateway.update_usage_plan'](plan['id'], throttle=throttle, quota=quota, **common_args) if 'error' in result: ret['result'] = False ret['comment'] = 'Failed to update a usage plan {0}, {1}'.format(plan_name, result['error']) return ret ret['changes']['old'] = {'plan': plan} ret['comment'] = 'usage plan {0} has been updated'.format(plan_name) newstate = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args) if 'error' in existing: ret['result'] = False ret['comment'] = 'Failed to describe existing usage plans after updates' return ret ret['changes']['new'] = {'plan': newstate['plans'][0]} except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret def usage_plan_absent(name, plan_name, region=None, key=None, keyid=None, profile=None): ''' Ensures usage plan identified by name is no longer present .. versionadded:: 2017.7.0 name name of the state plan_name name of the plan to remove .. code-block:: yaml usage plan absent: boto_apigateway.usage_plan_absent: - plan_name: my_usage_plan - profile: my_profile ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) existing = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args) if 'error' in existing: ret['result'] = False ret['comment'] = 'Failed to describe existing usage plans' return ret if not existing['plans']: ret['comment'] = 'Usage plan {0} does not exist already'.format(plan_name) return ret if __opts__['test']: ret['comment'] = 'Usage plan {0} exists and would be deleted'.format(plan_name) ret['result'] = None return ret plan_id = existing['plans'][0]['id'] result = __salt__['boto_apigateway.delete_usage_plan'](plan_id, **common_args) if 'error' in result: ret['result'] = False ret['comment'] = 'Failed to delete usage plan {0}, {1}'.format(plan_name, result) return ret ret['comment'] = 'Usage plan {0} has been deleted'.format(plan_name) ret['changes']['old'] = {'plan': existing['plans'][0]} ret['changes']['new'] = {'plan': None} except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret def usage_plan_association_present(name, plan_name, api_stages, region=None, key=None, keyid=None, profile=None): ''' Ensures usage plan identified by name is added to provided api_stages .. versionadded:: 2017.7.0 name name of the state plan_name name of the plan to use api_stages list of dictionaries, where each dictionary consists of the following keys: apiId apiId of the api to attach usage plan to stage stage name of the api to attach usage plan to .. code-block:: yaml UsagePlanAssociationPresent: boto_apigateway.usage_plan_association_present: - plan_name: my_plan - api_stages: - apiId: 9kb0404ec0 stage: my_stage - apiId: l9v7o2aj90 stage: my_stage - profile: my_profile ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) existing = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args) if 'error' in existing: ret['result'] = False ret['comment'] = 'Failed to describe existing usage plans' return ret if not existing['plans']: ret['comment'] = 'Usage plan {0} does not exist'.format(plan_name) ret['result'] = False return ret if len(existing['plans']) != 1: ret['comment'] = 'There are multiple usage plans with the same name - it is not supported' ret['result'] = False return ret plan = existing['plans'][0] plan_id = plan['id'] plan_stages = plan.get('apiStages', []) stages_to_add = [] for api in api_stages: if api not in plan_stages: stages_to_add.append(api) if not stages_to_add: ret['comment'] = 'Usage plan is already asssociated to all api stages' return ret result = __salt__['boto_apigateway.attach_usage_plan_to_apis'](plan_id, stages_to_add, **common_args) if 'error' in result: ret['comment'] = 'Failed to associate a usage plan {0} to the apis {1}, {2}'.format(plan_name, stages_to_add, result['error']) ret['result'] = False return ret ret['comment'] = 'successfully associated usage plan to apis' ret['changes']['old'] = plan_stages ret['changes']['new'] = result.get('result', {}).get('apiStages', []) except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret def usage_plan_association_absent(name, plan_name, api_stages, region=None, key=None, keyid=None, profile=None): ''' Ensures usage plan identified by name is removed from provided api_stages If a plan is associated to stages not listed in api_stages parameter, those associations remain intact. .. versionadded:: 2017.7.0 name name of the state plan_name name of the plan to use api_stages list of dictionaries, where each dictionary consists of the following keys: apiId apiId of the api to detach usage plan from stage stage name of the api to detach usage plan from .. code-block:: yaml UsagePlanAssociationAbsent: boto_apigateway.usage_plan_association_absent: - plan_name: my_plan - api_stages: - apiId: 9kb0404ec0 stage: my_stage - apiId: l9v7o2aj90 stage: my_stage - profile: my_profile ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) existing = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args) if 'error' in existing: ret['result'] = False ret['comment'] = 'Failed to describe existing usage plans' return ret if not existing['plans']: ret['comment'] = 'Usage plan {0} does not exist'.format(plan_name) ret['result'] = False return ret if len(existing['plans']) != 1: ret['comment'] = 'There are multiple usage plans with the same name - it is not supported' ret['result'] = False return ret plan = existing['plans'][0] plan_id = plan['id'] plan_stages = plan.get('apiStages', []) if not plan_stages: ret['comment'] = 'Usage plan {0} has no associated stages already'.format(plan_name) return ret stages_to_remove = [] for api in api_stages: if api in plan_stages: stages_to_remove.append(api) if not stages_to_remove: ret['comment'] = 'Usage plan is already not asssociated to any api stages' return ret result = __salt__['boto_apigateway.detach_usage_plan_from_apis'](plan_id, stages_to_remove, **common_args) if 'error' in result: ret['comment'] = 'Failed to disassociate a usage plan {0} from the apis {1}, {2}'.format(plan_name, stages_to_remove, result['error']) ret['result'] = False return ret ret['comment'] = 'successfully disassociated usage plan from apis' ret['changes']['old'] = plan_stages ret['changes']['new'] = result.get('result', {}).get('apiStages', []) except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret
saltstack/salt
salt/states/boto_apigateway.py
_gen_md5_filehash
python
def _gen_md5_filehash(fname, *args): ''' helper function to generate a md5 hash of the swagger definition file any extra argument passed to the function is converted to a string and participates in the hash calculation ''' _hash = hashlib.md5() with salt.utils.files.fopen(fname, 'rb') as f: for chunk in iter(lambda: f.read(4096), b''): _hash.update(chunk) for extra_arg in args: _hash.update(six.b(str(extra_arg))) return _hash.hexdigest()
helper function to generate a md5 hash of the swagger definition file any extra argument passed to the function is converted to a string and participates in the hash calculation
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_apigateway.py#L447-L460
null
# -*- coding: utf-8 -*- ''' Manage Apigateway Rest APIs =========================== .. versionadded:: 2016.11.0 :depends: - boto >= 2.8.0 - boto3 >= 1.2.1 - botocore >= 1.4.49 Create and destroy rest apis depending on a swagger version 2 definition file. Be aware that this interacts with Amazon's services, and so may incur charges. This module uses ``boto3``, which can be installed via package, or pip. This module accepts explicit vpc credentials but can also utilize IAM roles assigned to the instance through Instance Profiles. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More information available `here <http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html>`_. If IAM roles are not used you need to specify them either in a pillar file or in the minion's config file: .. code-block:: yaml vpc.keyid: GKTADJGHEIQSXMKKRBJ08H vpc.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs It's also possible to specify ``key``, ``keyid`` and ``region`` via a profile, either passed in as a dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 .. code-block:: yaml Ensure Apigateway API exists: boto_apigateway.present: - name: myfunction - region: us-east-1 - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import hashlib import logging import os import re # Import Salt Libs import salt.utils.files import salt.utils.json import salt.utils.yaml # Import 3rd-party libs from salt.ext import six log = logging.getLogger(__name__) def __virtual__(): ''' Only load if boto is available. ''' return 'boto_apigateway' if 'boto_apigateway.describe_apis' in __salt__ else False def present(name, api_name, swagger_file, stage_name, api_key_required, lambda_integration_role, lambda_region=None, stage_variables=None, region=None, key=None, keyid=None, profile=None, lambda_funcname_format='{stage}_{api}_{resource}_{method}', authorization_type='NONE', error_response_template=None, response_template=None): ''' Ensure the spcified api_name with the corresponding swaggerfile is deployed to the given stage_name in AWS ApiGateway. this state currently only supports ApiGateway integration with AWS Lambda, and CORS support is handled through a Mock integration. There may be multiple deployments for the API object, each deployment is tagged with a description (i.e. unique label) in pretty printed json format consisting of the following key/values. .. code-block:: text { "api_name": api_name, "swagger_file": basename_of_swagger_file "swagger_file_md5sum": md5sum_of_swagger_file, "swagger_info_object": info_object_content_in_swagger_file } Please note that the name of the lambda function to be integrated will be derived via the provided lambda_funcname_format parameters: - the default lambda_funcname_format is a string with the following substitutable keys: "{stage}_{api}_{resource}_{method}". The user can choose to reorder the known keys. - the stage key corresponds to the stage_name passed in. - the api key corresponds to the api_name passed in. - the resource corresponds to the resource path defined in the passed swagger file. - the method corresponds to the method for a resource path defined in the passed swagger file. For the default lambda_funcname_format, given the following input: .. code-block:: python api_name = ' Test Service' stage_name = 'alpha' basePath = '/api' path = '/a/{b}/c' method = 'POST' We will end up with the following Lambda Function Name that will be looked up: 'test_service_alpha_a_b_c_post' The canconicalization of these input parameters is done in the following order: 1. lambda_funcname_format is formatted with the input parameters as passed, 2. resulting string is stripped for leading/trailing spaces, 3. path parameter's curly braces are removed from the resource path, 4. consecutive spaces and forward slashes in the paths are replaced with '_' 5. consecutive '_' are replaced with '_' Please note that for error response handling, the swagger file must have an error response model with the following schema. The lambda functions should throw exceptions for any non successful responses. An optional pattern field can be specified in errorMessage field to aid the response mapping from Lambda to the proper error return status codes. .. code-block:: yaml Error: type: object properties: stackTrace: type: array items: type: array items: type: string description: call stack errorType: type: string description: error type errorMessage: type: string description: | Error message, will be matched based on pattern. If no pattern is specified, the default pattern used for response mapping will be +*. name The name of the state definition api_name The name of the rest api that we want to ensure exists in AWS API Gateway swagger_file Name of the location of the swagger rest api definition file in YAML format. stage_name Name of the stage we want to be associated with the given api_name and swagger_file definition api_key_required True or False - whether the API Key is required to call API methods lambda_integration_role The name or ARN of the IAM role that the AWS ApiGateway assumes when it executes your lambda function to handle incoming requests lambda_region The region where we expect to find the lambda functions. This is used to determine the region where we should look for the Lambda Function for integration purposes. The region determination is based on the following priority: 1. lambda_region as passed in (is not None) 2. if lambda_region is None, use the region as if a boto_lambda function were executed without explicitly specifying lambda region. 3. if region determined in (2) is different than the region used by boto_apigateway functions, a final lookup will be attempted using the boto_apigateway region. stage_variables A dict with variables and their values, or a pillar key (string) that contains a dict with variables and their values. key and values in the dict must be strings. {'string': 'string'} region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. lambda_funcname_format Please review the earlier example for the usage. The only substituable keys in the funcname format are {stage}, {api}, {resource}, {method}. Any other keys or positional subsitution parameters will be flagged as an invalid input. authorization_type This field can be either 'NONE', or 'AWS_IAM'. This will be applied to all methods in the given swagger spec file. Default is set to 'NONE' error_response_template String value that defines the response template mapping that should be applied in cases error occurs. Refer to AWS documentation for details: http://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-mapping-template-reference.html If set to None, the following default value is used: .. code-block:: text '#set($inputRoot = $input.path(\'$\'))\\n' '{\\n' ' "errorMessage" : "$inputRoot.errorMessage",\\n' ' "errorType" : "$inputRoot.errorType",\\n' ' "stackTrace" : [\\n' '#foreach($stackTrace in $inputRoot.stackTrace)\\n' ' [\\n' '#foreach($elem in $stackTrace)\\n' ' "$elem"\\n' '#if($foreach.hasNext),#end\\n' '#end\\n' ' ]\\n' '#if($foreach.hasNext),#end\\n' '#end\\n' ' ]\\n' .. versionadded:: 2017.7.0 response_template String value that defines the response template mapping applied in case of success (including OPTIONS method) If set to None, empty ({}) template is assumed, which will transfer response from the lambda function as is. .. versionadded:: 2017.7.0 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) # try to open the swagger file and basic validation swagger = _Swagger(api_name, stage_name, lambda_funcname_format, swagger_file, error_response_template, response_template, common_args) # retrieve stage variables stage_vars = _get_stage_variables(stage_variables) # verify if api and stage already exists ret = swagger.verify_api(ret) if ret.get('publish'): # there is a deployment label with signature matching the given api_name, # swagger file name, swagger file md5 sum, and swagger file info object # just reassociate the stage_name to the given deployment label. if __opts__['test']: ret['comment'] = ('[stage: {0}] will be reassociated to an already available ' 'deployment that matched the given [api_name: {1}] ' 'and [swagger_file: {2}].\n' 'Stage variables will be set ' 'to {3}.'.format(stage_name, api_name, swagger_file, stage_vars)) ret['result'] = None return ret return swagger.publish_api(ret, stage_vars) if ret.get('current'): # already at desired state for the stage, swagger_file, and api_name if __opts__['test']: ret['comment'] = ('[stage: {0}] is already at desired state with an associated ' 'deployment matching the given [api_name: {1}] ' 'and [swagger_file: {2}].\n' 'Stage variables will be set ' 'to {3}.'.format(stage_name, api_name, swagger_file, stage_vars)) ret['result'] = None return swagger.overwrite_stage_variables(ret, stage_vars) # there doesn't exist any previous deployments for the given swagger_file, we need # to redeploy the content of the swagger file to the api, models, and resources object # and finally create a new deployment and tie the stage_name to this new deployment if __opts__['test']: ret['comment'] = ('There is no deployment matching the given [api_name: {0}] ' 'and [swagger_file: {1}]. A new deployment will be ' 'created and the [stage_name: {2}] will then be associated ' 'to the newly created deployment.\n' 'Stage variables will be set ' 'to {3}.'.format(api_name, swagger_file, stage_name, stage_vars)) ret['result'] = None return ret ret = swagger.deploy_api(ret) if ret.get('abort'): return ret ret = swagger.deploy_models(ret) if ret.get('abort'): return ret ret = swagger.deploy_resources(ret, api_key_required=api_key_required, lambda_integration_role=lambda_integration_role, lambda_region=lambda_region, authorization_type=authorization_type) if ret.get('abort'): return ret ret = swagger.publish_api(ret, stage_vars) except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret def _get_stage_variables(stage_variables): ''' Helper function to retrieve stage variables from pillars/options, if the input is a string ''' ret = dict() if stage_variables is None: return ret if isinstance(stage_variables, six.string_types): if stage_variables in __opts__: ret = __opts__[stage_variables] master_opts = __pillar__.get('master', {}) if stage_variables in master_opts: ret = master_opts[stage_variables] if stage_variables in __pillar__: ret = __pillar__[stage_variables] elif isinstance(stage_variables, dict): ret = stage_variables if not isinstance(ret, dict): ret = dict() return ret def absent(name, api_name, stage_name, nuke_api=False, region=None, key=None, keyid=None, profile=None): ''' Ensure the stage_name associated with the given api_name deployed by boto_apigateway's present state is removed. If the currently associated deployment to the given stage_name has no other stages associated with it, the deployment will also be removed. name Name of the swagger file in YAML format api_name Name of the rest api on AWS ApiGateway to ensure is absent. stage_name Name of the stage to be removed irrespective of the swagger file content. If the current deployment associated with the stage_name has no other stages associated with it, the deployment will also be removed. nuke_api If True, removes the API itself only if there are no other stages associated with any other deployments once the given stage_name is removed. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) swagger = _Swagger(api_name, stage_name, '', None, None, None, common_args) if not swagger.restApiId: ret['comment'] = '[Rest API: {0}] does not exist.'.format(api_name) return ret if __opts__['test']: if nuke_api: ret['comment'] = ('[stage: {0}] will be deleted, if there are no other ' 'active stages, the [api: {1} will also be ' 'deleted.'.format(stage_name, api_name)) else: ret['comment'] = ('[stage: {0}] will be deleted.'.format(stage_name)) ret['result'] = None return ret ret = swagger.delete_stage(ret) if ret.get('abort'): return ret if nuke_api and swagger.no_more_deployments_remain(): ret = swagger.delete_api(ret) except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret # Helper Swagger Class for swagger version 2.0 API specification def _dict_to_json_pretty(d, sort_keys=True): ''' helper function to generate pretty printed json output ''' return salt.utils.json.dumps(d, indent=4, separators=(',', ': '), sort_keys=sort_keys) # Heuristic on whether or not the property name loosely matches given set of 'interesting' factors # If you are interested in IDs for example, 'id', 'blah_id', 'blahId' would all match def _name_matches(name, matches): ''' Helper function to see if given name has any of the patterns in given matches ''' for m in matches: if name.endswith(m): return True if name.lower().endswith('_' + m.lower()): return True if name.lower() == m.lower(): return True return False def _object_reducer(o, names=('id', 'name', 'path', 'httpMethod', 'statusCode', 'Created', 'Deleted', 'Updated', 'Flushed', 'Associated', 'Disassociated')): ''' Helper function to reduce the amount of information that will be kept in the change log for API GW related return values ''' result = {} if isinstance(o, dict): for k, v in six.iteritems(o): if isinstance(v, dict): reduced = v if k == 'variables' else _object_reducer(v, names) if reduced or _name_matches(k, names): result[k] = reduced elif isinstance(v, list): newlist = [] for val in v: reduced = _object_reducer(val, names) if reduced or _name_matches(k, names): newlist.append(reduced) if newlist: result[k] = newlist else: if _name_matches(k, names): result[k] = v return result def _log_changes(ret, changekey, changevalue): ''' For logging create/update/delete operations to AWS ApiGateway ''' cl = ret['changes'].get('new', []) cl.append({changekey: _object_reducer(changevalue)}) ret['changes']['new'] = cl return ret def _log_error_and_abort(ret, obj): ''' helper function to update errors in the return structure ''' ret['result'] = False ret['abort'] = True if 'error' in obj: ret['comment'] = '{0}'.format(obj.get('error')) return ret class _Swagger(object): ''' this is a helper class that holds the swagger definition file and the associated logic related to how to interpret the file and apply it to AWS Api Gateway. The main interface to the outside world is in deploy_api, deploy_models, and deploy_resources methods. ''' SWAGGER_OBJ_V2_FIELDS = ('swagger', 'info', 'host', 'basePath', 'schemes', 'consumes', 'produces', 'paths', 'definitions', 'parameters', 'responses', 'securityDefinitions', 'security', 'tags', 'externalDocs') # SWAGGER OBJECT V2 Fields that are required by boto apigateway states. SWAGGER_OBJ_V2_FIELDS_REQUIRED = ('swagger', 'info', 'basePath', 'schemes', 'paths', 'definitions') # SWAGGER OPERATION NAMES SWAGGER_OPERATION_NAMES = ('get', 'put', 'post', 'delete', 'options', 'head', 'patch') SWAGGER_VERSIONS_SUPPORTED = ('2.0',) # VENDOR SPECIFIC FIELD PATTERNS VENDOR_EXT_PATTERN = re.compile('^x-') # JSON_SCHEMA_REF JSON_SCHEMA_DRAFT_4 = 'http://json-schema.org/draft-04/schema#' # AWS integration templates for normal and options methods REQUEST_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n' '{\n' '"header_params" : {\n' '#set ($map = $input.params().header)\n' '#foreach( $param in $map.entrySet() )\n' '"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n' '#end\n' '},\n' '"query_params" : {\n' '#set ($map = $input.params().querystring)\n' '#foreach( $param in $map.entrySet() )\n' '"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n' '#end\n' '},\n' '"path_params" : {\n' '#set ($map = $input.params().path)\n' '#foreach( $param in $map.entrySet() )\n' '"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n' '#end\n' '},\n' '"apigw_context" : {\n' '"apiId": "$context.apiId",\n' '"httpMethod": "$context.httpMethod",\n' '"requestId": "$context.requestId",\n' '"resourceId": "$context.resourceId",\n' '"resourcePath": "$context.resourcePath",\n' '"stage": "$context.stage",\n' '"identity": {\n' ' "user":"$context.identity.user",\n' ' "userArn":"$context.identity.userArn",\n' ' "userAgent":"$context.identity.userAgent",\n' ' "sourceIp":"$context.identity.sourceIp",\n' ' "cognitoIdentityId":"$context.identity.cognitoIdentityId",\n' ' "cognitoIdentityPoolId":"$context.identity.cognitoIdentityPoolId",\n' ' "cognitoAuthenticationType":"$context.identity.cognitoAuthenticationType",\n' ' "cognitoAuthenticationProvider":["$util.escapeJavaScript($context.identity.cognitoAuthenticationProvider)"],\n' ' "caller":"$context.identity.caller",\n' ' "apiKey":"$context.identity.apiKey",\n' ' "accountId":"$context.identity.accountId"\n' '}\n' '},\n' '"body_params" : $input.json(\'$\'),\n' '"stage_variables": {\n' '#foreach($variable in $stageVariables.keySet())\n' '"$variable": "$util.escapeJavaScript($stageVariables.get($variable))"\n' '#if($foreach.hasNext), #end\n' '#end\n' '}\n' '}'} REQUEST_OPTION_TEMPLATE = {'application/json': '{"statusCode": 200}'} # AWS integration response template mapping to convert stackTrace part or the error # to a uniform format containing strings only. Swagger does not seem to allow defining # an array of non-uniform types, to it is not possible to create error model to match # exactly what comes out of lambda functions in case of error. RESPONSE_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n' '{\n' ' "errorMessage" : "$inputRoot.errorMessage",\n' ' "errorType" : "$inputRoot.errorType",\n' ' "stackTrace" : [\n' '#foreach($stackTrace in $inputRoot.stackTrace)\n' ' [\n' '#foreach($elem in $stackTrace)\n' ' "$elem"\n' '#if($foreach.hasNext),#end\n' '#end\n' ' ]\n' '#if($foreach.hasNext),#end\n' '#end\n' ' ]\n' '}'} RESPONSE_OPTION_TEMPLATE = {} # This string should not be modified, every API created by this state will carry the description # below. AWS_API_DESCRIPTION = _dict_to_json_pretty({"provisioned_by": "Salt boto_apigateway.present State", "context": "See deployment or stage description"}) class SwaggerParameter(object): ''' This is a helper class for the Swagger Parameter Object ''' LOCATIONS = ('body', 'query', 'header', 'path') def __init__(self, paramdict): self._paramdict = paramdict @property def location(self): ''' returns location in the swagger parameter object ''' _location = self._paramdict.get('in') if _location in _Swagger.SwaggerParameter.LOCATIONS: return _location raise ValueError('Unsupported parameter location: {0} in Parameter Object'.format(_location)) @property def name(self): ''' returns parameter name in the swagger parameter object ''' _name = self._paramdict.get('name') if _name: if self.location == 'header': return 'method.request.header.{0}'.format(_name) elif self.location == 'query': return 'method.request.querystring.{0}'.format(_name) elif self.location == 'path': return 'method.request.path.{0}'.format(_name) return None raise ValueError('Parameter must have a name: {0}'.format(_dict_to_json_pretty(self._paramdict))) @property def schema(self): ''' returns the name of the schema given the reference in the swagger parameter object ''' if self.location == 'body': _schema = self._paramdict.get('schema') if _schema: if '$ref' in _schema: schema_name = _schema.get('$ref').split('/')[-1] return schema_name raise ValueError(('Body parameter must have a JSON reference ' 'to the schema definition due to Amazon API restrictions: {0}'.format(self.name))) raise ValueError('Body parameter must have a schema: {0}'.format(self.name)) return None class SwaggerMethodResponse(object): ''' Helper class for Swagger Method Response Object ''' def __init__(self, r): self._r = r @property def schema(self): ''' returns the name of the schema given the reference in the swagger method response object ''' _schema = self._r.get('schema') if _schema: if '$ref' in _schema: return _schema.get('$ref').split('/')[-1] raise ValueError(('Method response must have a JSON reference ' 'to the schema definition: {0}'.format(_schema))) return None @property def headers(self): ''' returns the headers dictionary in the method response object ''' _headers = self._r.get('headers', {}) return _headers def __init__(self, api_name, stage_name, lambda_funcname_format, swagger_file_path, error_response_template, response_template, common_aws_args): self._api_name = api_name self._stage_name = stage_name self._lambda_funcname_format = lambda_funcname_format self._common_aws_args = common_aws_args self._restApiId = '' self._deploymentId = '' self._error_response_template = error_response_template self._response_template = response_template if swagger_file_path is not None: if os.path.exists(swagger_file_path) and os.path.isfile(swagger_file_path): self._swagger_file = swagger_file_path self._md5_filehash = _gen_md5_filehash(self._swagger_file, error_response_template, response_template) with salt.utils.files.fopen(self._swagger_file, 'rb') as sf: self._cfg = salt.utils.yaml.safe_load(sf) self._swagger_version = '' else: raise IOError('Invalid swagger file path, {0}'.format(swagger_file_path)) self._validate_swagger_file() self._validate_lambda_funcname_format() self._resolve_api_id() def _is_http_error_rescode(self, code): ''' Helper function to determine if the passed code is in the 400~599 range of http error codes ''' return bool(re.match(r'^\s*[45]\d\d\s*$', code)) def _validate_error_response_model(self, paths, mods): ''' Helper function to help validate the convention established in the swagger file on how to handle response code mapping/integration ''' for path, ops in paths: for opname, opobj in six.iteritems(ops): if opname not in _Swagger.SWAGGER_OPERATION_NAMES: continue if 'responses' not in opobj: raise ValueError('missing mandatory responses field in path item object') for rescode, resobj in six.iteritems(opobj.get('responses')): if not self._is_http_error_rescode(str(rescode)): # future lint: disable=blacklisted-function continue # only check for response code from 400-599 if 'schema' not in resobj: raise ValueError('missing schema field in path {0}, ' 'op {1}, response {2}'.format(path, opname, rescode)) schemaobj = resobj.get('schema') if '$ref' not in schemaobj: raise ValueError('missing $ref field under schema in ' 'path {0}, op {1}, response {2}'.format(path, opname, rescode)) schemaobjref = schemaobj.get('$ref', '/') modelname = schemaobjref.split('/')[-1] if modelname not in mods: raise ValueError('model schema {0} reference not found ' 'under /definitions'.format(schemaobjref)) model = mods.get(modelname) if model.get('type') != 'object': raise ValueError('model schema {0} must be type object'.format(modelname)) if 'properties' not in model: raise ValueError('model schema {0} must have properties fields'.format(modelname)) modelprops = model.get('properties') if 'errorMessage' not in modelprops: raise ValueError('model schema {0} must have errorMessage as a property to ' 'match AWS convention. If pattern is not set, .+ will ' 'be used'.format(modelname)) def _validate_lambda_funcname_format(self): ''' Checks if the lambda function name format contains only known elements :return: True on success, ValueError raised on error ''' try: if self._lambda_funcname_format: known_kwargs = dict(stage='', api='', resource='', method='') self._lambda_funcname_format.format(**known_kwargs) return True except Exception: raise ValueError('Invalid lambda_funcname_format {0}. Please review ' 'documentation for known substitutable keys'.format(self._lambda_funcname_format)) def _validate_swagger_file(self): ''' High level check/validation of the input swagger file based on https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md This is not a full schema compliance check, but rather make sure that the input file (YAML or JSON) can be read into a dictionary, and we check for the content of the Swagger Object for version and info. ''' # check for any invalid fields for Swagger Object V2 for field in self._cfg: if (field not in _Swagger.SWAGGER_OBJ_V2_FIELDS and not _Swagger.VENDOR_EXT_PATTERN.match(field)): raise ValueError('Invalid Swagger Object Field: {0}'.format(field)) # check for Required Swagger fields by Saltstack boto apigateway state for field in _Swagger.SWAGGER_OBJ_V2_FIELDS_REQUIRED: if field not in self._cfg: raise ValueError('Missing Swagger Object Field: {0}'.format(field)) # check for Swagger Version self._swagger_version = self._cfg.get('swagger') if self._swagger_version not in _Swagger.SWAGGER_VERSIONS_SUPPORTED: raise ValueError('Unsupported Swagger version: {0},' 'Supported versions are {1}'.format(self._swagger_version, _Swagger.SWAGGER_VERSIONS_SUPPORTED)) log.info(type(self._models)) self._validate_error_response_model(self.paths, self._models()) @property def md5_filehash(self): ''' returns md5 hash for the swagger file ''' return self._md5_filehash @property def info(self): ''' returns the swagger info object as a dictionary ''' info = self._cfg.get('info') if not info: raise ValueError('Info Object has no values') return info @property def info_json(self): ''' returns the swagger info object as a pretty printed json string. ''' return _dict_to_json_pretty(self.info) @property def rest_api_name(self): ''' returns the name of the api ''' return self._api_name @property def rest_api_version(self): ''' returns the version field in the swagger info object ''' version = self.info.get('version') if not version: raise ValueError('Missing version value in Info Object') return version def _models(self): ''' returns an iterator for the models specified in the swagger file ''' models = self._cfg.get('definitions') if not models: raise ValueError('Definitions Object has no values, You need to define them in your swagger file') return models def models(self): ''' generator to return the tuple of model and its schema to create on aws. ''' model_dict = self._build_all_dependencies() while True: model = self._get_model_without_dependencies(model_dict) if not model: break yield (model, self._models().get(model)) @property def paths(self): ''' returns an iterator for the relative resource paths specified in the swagger file ''' paths = self._cfg.get('paths') if not paths: raise ValueError('Paths Object has no values, You need to define them in your swagger file') for path in paths: if not path.startswith('/'): raise ValueError('Path object {0} should start with /. Please fix it'.format(path)) return six.iteritems(paths) @property def basePath(self): ''' returns the base path field as defined in the swagger file ''' basePath = self._cfg.get('basePath', '') return basePath @property def restApiId(self): ''' returns the rest api id as returned by AWS on creation of the rest api ''' return self._restApiId @restApiId.setter def restApiId(self, restApiId): ''' allows the assignment of the rest api id on creation of the rest api ''' self._restApiId = restApiId @property def deployment_label_json(self): ''' this property returns the unique description in pretty printed json for a particular api deployment ''' return _dict_to_json_pretty(self.deployment_label) @property def deployment_label(self): ''' this property returns the deployment label dictionary (mainly used by stage description) ''' label = dict() label['swagger_info_object'] = self.info label['api_name'] = self.rest_api_name label['swagger_file'] = os.path.basename(self._swagger_file) label['swagger_file_md5sum'] = self.md5_filehash return label # methods to interact with boto_apigateway execution modules def _one_or_more_stages_remain(self, deploymentId): ''' Helper function to find whether there are other stages still associated with a deployment ''' stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args).get('stages') return bool(stages) def no_more_deployments_remain(self): ''' Helper function to find whether there are deployments left with stages associated ''' no_more_deployments = True deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId, **self._common_aws_args).get('deployments') if deployments: for deployment in deployments: deploymentId = deployment.get('id') stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args).get('stages') if stages: no_more_deployments = False break return no_more_deployments def _get_current_deployment_id(self): ''' Helper method to find the deployment id that the stage name is currently assocaited with. ''' deploymentId = '' stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, **self._common_aws_args).get('stage') if stage: deploymentId = stage.get('deploymentId') return deploymentId def _get_current_deployment_label(self): ''' Helper method to find the deployment label that the stage_name is currently associated with. ''' deploymentId = self._get_current_deployment_id() deployment = __salt__['boto_apigateway.describe_api_deployment'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args).get('deployment') if deployment: return deployment.get('description') return None def _get_desired_deployment_id(self): ''' Helper method to return the deployment id matching the desired deployment label for this Swagger object based on the given api_name, swagger_file ''' deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId, **self._common_aws_args).get('deployments') if deployments: for deployment in deployments: if deployment.get('description') == self.deployment_label_json: return deployment.get('id') return '' def overwrite_stage_variables(self, ret, stage_variables): ''' overwrite the given stage_name's stage variables with the given stage_variables ''' res = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId, stageName=self._stage_name, variables=stage_variables, **self._common_aws_args) if not res.get('overwrite'): ret['result'] = False ret['abort'] = True ret['comment'] = res.get('error') else: ret = _log_changes(ret, 'overwrite_stage_variables', res.get('stage')) return ret def _set_current_deployment(self, stage_desc_json, stage_variables): ''' Helper method to associate the stage_name to the given deploymentId and make this current ''' stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, **self._common_aws_args).get('stage') if not stage: stage = __salt__['boto_apigateway.create_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, deploymentId=self._deploymentId, description=stage_desc_json, variables=stage_variables, **self._common_aws_args) if not stage.get('stage'): return {'set': False, 'error': stage.get('error')} else: # overwrite the stage variables overwrite = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId, stageName=self._stage_name, variables=stage_variables, **self._common_aws_args) if not overwrite.get('stage'): return {'set': False, 'error': overwrite.get('error')} return __salt__['boto_apigateway.activate_api_deployment'](restApiId=self.restApiId, stageName=self._stage_name, deploymentId=self._deploymentId, **self._common_aws_args) def _resolve_api_id(self): ''' returns an Api Id that matches the given api_name and the hardcoded _Swagger.AWS_API_DESCRIPTION as the api description ''' apis = __salt__['boto_apigateway.describe_apis'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args).get('restapi') if apis: if len(apis) == 1: self.restApiId = apis[0].get('id') else: raise ValueError('Multiple APIs matching given name {0} and ' 'description {1}'.format(self.rest_api_name, self.info_json)) def delete_stage(self, ret): ''' Method to delete the given stage_name. If the current deployment tied to the given stage_name has no other stages associated with it, the deployment will be removed as well ''' deploymentId = self._get_current_deployment_id() if deploymentId: result = __salt__['boto_apigateway.delete_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, **self._common_aws_args) if not result.get('deleted'): ret['abort'] = True ret['result'] = False ret['comment'] = 'delete_stage delete_api_stage, {0}'.format(result.get('error')) else: # check if it is safe to delete the deployment as well. if not self._one_or_more_stages_remain(deploymentId): result = __salt__['boto_apigateway.delete_api_deployment'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args) if not result.get('deleted'): ret['abort'] = True ret['result'] = False ret['comment'] = 'delete_stage delete_api_deployment, {0}'.format(result.get('error')) else: ret['comment'] = 'stage {0} has been deleted.\n'.format(self._stage_name) else: # no matching stage_name/deployment found ret['comment'] = 'stage {0} does not exist'.format(self._stage_name) return ret def verify_api(self, ret): ''' this method helps determine if the given stage_name is already on a deployment label matching the input api_name, swagger_file. If yes, returns abort with comment indicating already at desired state. If not and there is previous deployment labels in AWS matching the given input api_name and swagger file, indicate to the caller that we only need to reassociate stage_name to the previously existing deployment label. ''' if self.restApiId: deployed_label_json = self._get_current_deployment_label() if deployed_label_json == self.deployment_label_json: ret['comment'] = ('Already at desired state, the stage {0} is already at the desired ' 'deployment label:\n{1}'.format(self._stage_name, deployed_label_json)) ret['current'] = True return ret else: self._deploymentId = self._get_desired_deployment_id() if self._deploymentId: ret['publish'] = True return ret def publish_api(self, ret, stage_variables): ''' this method tie the given stage_name to a deployment matching the given swagger_file ''' stage_desc = dict() stage_desc['current_deployment_label'] = self.deployment_label stage_desc_json = _dict_to_json_pretty(stage_desc) if self._deploymentId: # just do a reassociate of stage_name to an already existing deployment res = self._set_current_deployment(stage_desc_json, stage_variables) if not res.get('set'): ret['abort'] = True ret['result'] = False ret['comment'] = res.get('error') else: ret = _log_changes(ret, 'publish_api (reassociate deployment, set stage_variables)', res.get('response')) else: # no deployment existed for the given swagger_file for this Swagger object res = __salt__['boto_apigateway.create_api_deployment'](restApiId=self.restApiId, stageName=self._stage_name, stageDescription=stage_desc_json, description=self.deployment_label_json, variables=stage_variables, **self._common_aws_args) if not res.get('created'): ret['abort'] = True ret['result'] = False ret['comment'] = res.get('error') else: ret = _log_changes(ret, 'publish_api (new deployment)', res.get('deployment')) return ret def _cleanup_api(self): ''' Helper method to clean up resources and models if we detected a change in the swagger file for a stage ''' resources = __salt__['boto_apigateway.describe_api_resources'](restApiId=self.restApiId, **self._common_aws_args) if resources.get('resources'): res = resources.get('resources')[1:] res.reverse() for resource in res: delres = __salt__['boto_apigateway.delete_api_resources'](restApiId=self.restApiId, path=resource.get('path'), **self._common_aws_args) if not delres.get('deleted'): return delres models = __salt__['boto_apigateway.describe_api_models'](restApiId=self.restApiId, **self._common_aws_args) if models.get('models'): for model in models.get('models'): delres = __salt__['boto_apigateway.delete_api_model'](restApiId=self.restApiId, modelName=model.get('name'), **self._common_aws_args) if not delres.get('deleted'): return delres return {'deleted': True} def deploy_api(self, ret): ''' this method create the top level rest api in AWS apigateway ''' if self.restApiId: res = self._cleanup_api() if not res.get('deleted'): ret['comment'] = 'Failed to cleanup restAreId {0}'.format(self.restApiId) ret['abort'] = True ret['result'] = False return ret return ret response = __salt__['boto_apigateway.create_api'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args) if not response.get('created'): ret['result'] = False ret['abort'] = True if 'error' in response: ret['comment'] = 'Failed to create rest api: {0}.'.format(response['error']['message']) return ret self.restApiId = response.get('restapi', {}).get('id') return _log_changes(ret, 'deploy_api', response.get('restapi')) def delete_api(self, ret): ''' Method to delete a Rest Api named defined in the swagger file's Info Object's title value. ret a dictionary for returning status to Saltstack ''' exists_response = __salt__['boto_apigateway.api_exists'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args) if exists_response.get('exists'): if __opts__['test']: ret['comment'] = 'Rest API named {0} is set to be deleted.'.format(self.rest_api_name) ret['result'] = None ret['abort'] = True return ret delete_api_response = __salt__['boto_apigateway.delete_api'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args) if not delete_api_response.get('deleted'): ret['result'] = False ret['abort'] = True if 'error' in delete_api_response: ret['comment'] = 'Failed to delete rest api: {0}.'.format(delete_api_response['error']['message']) return ret ret = _log_changes(ret, 'delete_api', delete_api_response) else: ret['comment'] = ('api already absent for swagger file: ' '{0}, desc: {1}'.format(self.rest_api_name, self.info_json)) return ret def _aws_model_ref_from_swagger_ref(self, r): ''' Helper function to reference models created on aws apigw ''' model_name = r.split('/')[-1] return 'https://apigateway.amazonaws.com/restapis/{0}/models/{1}'.format(self.restApiId, model_name) def _update_schema_to_aws_notation(self, schema): ''' Helper function to map model schema to aws notation ''' result = {} for k, v in schema.items(): if k == '$ref': v = self._aws_model_ref_from_swagger_ref(v) if isinstance(v, dict): v = self._update_schema_to_aws_notation(v) result[k] = v return result def _build_dependent_model_list(self, obj_schema): ''' Helper function to build the list of models the given object schema is referencing. ''' dep_models_list = [] if obj_schema: obj_schema['type'] = obj_schema.get('type', 'object') if obj_schema['type'] == 'array': dep_models_list.extend(self._build_dependent_model_list(obj_schema.get('items', {}))) else: ref = obj_schema.get('$ref') if ref: ref_obj_model = ref.split("/")[-1] ref_obj_schema = self._models().get(ref_obj_model) dep_models_list.extend(self._build_dependent_model_list(ref_obj_schema)) dep_models_list.extend([ref_obj_model]) else: # need to walk each property object properties = obj_schema.get('properties') if properties: for _, prop_obj_schema in six.iteritems(properties): dep_models_list.extend(self._build_dependent_model_list(prop_obj_schema)) return list(set(dep_models_list)) def _build_all_dependencies(self): ''' Helper function to build a map of model to their list of model reference dependencies ''' ret = {} for model, schema in six.iteritems(self._models()): dep_list = self._build_dependent_model_list(schema) ret[model] = dep_list return ret def _get_model_without_dependencies(self, models_dict): ''' Helper function to find the next model that should be created ''' next_model = None if not models_dict: return next_model for model, dependencies in six.iteritems(models_dict): if dependencies == []: next_model = model break if next_model is None: raise ValueError('incomplete model definitions, models in dependency ' 'list not defined: {0}'.format(models_dict)) # remove the model from other depednencies before returning models_dict.pop(next_model) for model, dep_list in six.iteritems(models_dict): if next_model in dep_list: dep_list.remove(next_model) return next_model def deploy_models(self, ret): ''' Method to deploy swagger file's definition objects and associated schema to AWS Apigateway as Models ret a dictionary for returning status to Saltstack ''' for model, schema in self.models(): # add in a few attributes into the model schema that AWS expects # _schema = schema.copy() _schema = self._update_schema_to_aws_notation(schema) _schema.update({'$schema': _Swagger.JSON_SCHEMA_DRAFT_4, 'title': '{0} Schema'.format(model)}) # check to see if model already exists, aws has 2 default models [Empty, Error] # which may need upate with data from swagger file model_exists_response = __salt__['boto_apigateway.api_model_exists'](restApiId=self.restApiId, modelName=model, **self._common_aws_args) if model_exists_response.get('exists'): update_model_schema_response = ( __salt__['boto_apigateway.update_api_model_schema'](restApiId=self.restApiId, modelName=model, schema=_dict_to_json_pretty(_schema), **self._common_aws_args)) if not update_model_schema_response.get('updated'): ret['result'] = False ret['abort'] = True if 'error' in update_model_schema_response: ret['comment'] = ('Failed to update existing model {0} with schema {1}, ' 'error: {2}'.format(model, _dict_to_json_pretty(schema), update_model_schema_response['error']['message'])) return ret ret = _log_changes(ret, 'deploy_models', update_model_schema_response) else: create_model_response = ( __salt__['boto_apigateway.create_api_model'](restApiId=self.restApiId, modelName=model, modelDescription=model, schema=_dict_to_json_pretty(_schema), contentType='application/json', **self._common_aws_args)) if not create_model_response.get('created'): ret['result'] = False ret['abort'] = True if 'error' in create_model_response: ret['comment'] = ('Failed to create model {0}, schema {1}, ' 'error: {2}'.format(model, _dict_to_json_pretty(schema), create_model_response['error']['message'])) return ret ret = _log_changes(ret, 'deploy_models', create_model_response) return ret def _lambda_name(self, resourcePath, httpMethod): ''' Helper method to construct lambda name based on the rule specified in doc string of boto_apigateway.api_present function ''' lambda_name = self._lambda_funcname_format.format(stage=self._stage_name, api=self.rest_api_name, resource=resourcePath, method=httpMethod) lambda_name = lambda_name.strip() lambda_name = re.sub(r'{|}', '', lambda_name) lambda_name = re.sub(r'\s+|/', '_', lambda_name).lower() return re.sub(r'_+', '_', lambda_name) def _lambda_uri(self, lambda_name, lambda_region): ''' Helper Method to construct the lambda uri for use in method integration ''' profile = self._common_aws_args.get('profile') region = self._common_aws_args.get('region') lambda_region = __utils__['boto3.get_region']('lambda', lambda_region, profile) apigw_region = __utils__['boto3.get_region']('apigateway', region, profile) lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args) if lambda_region != apigw_region: if not lambda_desc.get('function'): # try look up in the same region as the apigateway as well if previous lookup failed lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args) if not lambda_desc.get('function'): raise ValueError('Could not find lambda function {0} in ' 'regions [{1}, {2}].'.format(lambda_name, lambda_region, apigw_region)) lambda_arn = lambda_desc.get('function').get('FunctionArn') lambda_uri = ('arn:aws:apigateway:{0}:lambda:path/2015-03-31' '/functions/{1}/invocations'.format(apigw_region, lambda_arn)) return lambda_uri def _parse_method_data(self, method_name, method_data): ''' Helper function to construct the method request params, models, request_templates and integration_type values needed to configure method request integration/mappings. ''' method_params = {} method_models = {} if 'parameters' in method_data: for param in method_data['parameters']: p = _Swagger.SwaggerParameter(param) if p.name: method_params[p.name] = True if p.schema: method_models['application/json'] = p.schema request_templates = _Swagger.REQUEST_OPTION_TEMPLATE if method_name == 'options' else _Swagger.REQUEST_TEMPLATE integration_type = "MOCK" if method_name == 'options' else "AWS" return {'params': method_params, 'models': method_models, 'request_templates': request_templates, 'integration_type': integration_type} def _find_patterns(self, o): result = [] if isinstance(o, dict): for k, v in six.iteritems(o): if isinstance(v, dict): result.extend(self._find_patterns(v)) else: if k == 'pattern': result.append(v) return result def _get_pattern_for_schema(self, schema_name, httpStatus): ''' returns the pattern specified in a response schema ''' defaultPattern = '.+' if self._is_http_error_rescode(httpStatus) else '.*' model = self._models().get(schema_name) patterns = self._find_patterns(model) return patterns[0] if patterns else defaultPattern def _get_response_template(self, method_name, http_status): if method_name == 'options' or not self._is_http_error_rescode(http_status): response_templates = {'application/json': self._response_template} \ if self._response_template else self.RESPONSE_OPTION_TEMPLATE else: response_templates = {'application/json': self._error_response_template} \ if self._error_response_template else self.RESPONSE_TEMPLATE return response_templates def _parse_method_response(self, method_name, method_response, httpStatus): ''' Helper function to construct the method response params, models, and integration_params values needed to configure method response integration/mappings. ''' method_response_models = {} method_response_pattern = '.*' if method_response.schema: method_response_models['application/json'] = method_response.schema method_response_pattern = self._get_pattern_for_schema(method_response.schema, httpStatus) method_response_params = {} method_integration_response_params = {} for header in method_response.headers: response_header = 'method.response.header.{0}'.format(header) method_response_params[response_header] = False header_data = method_response.headers.get(header) method_integration_response_params[response_header] = ( "'{0}'".format(header_data.get('default')) if 'default' in header_data else "'*'") response_templates = self._get_response_template(method_name, httpStatus) return {'params': method_response_params, 'models': method_response_models, 'integration_params': method_integration_response_params, 'pattern': method_response_pattern, 'response_templates': response_templates} def _deploy_method(self, ret, resource_path, method_name, method_data, api_key_required, lambda_integration_role, lambda_region, authorization_type): ''' Method to create a method for the given resource path, along with its associated request and response integrations. ret a dictionary for returning status to Saltstack resource_path the full resource path where the named method_name will be associated with. method_name a string that is one of the following values: 'delete', 'get', 'head', 'options', 'patch', 'post', 'put' method_data the value dictionary for this method in the swagger definition file. api_key_required True or False, whether api key is required to access this method. lambda_integration_role name of the IAM role or IAM role arn that Api Gateway will assume when executing the associated lambda function lambda_region the region for the lambda function that Api Gateway will integrate to. authorization_type 'NONE' or 'AWS_IAM' ''' method = self._parse_method_data(method_name.lower(), method_data) # for options method to enable CORS, api_key_required will be set to False always. # authorization_type will be set to 'NONE' always. if method_name.lower() == 'options': api_key_required = False authorization_type = 'NONE' m = __salt__['boto_apigateway.create_api_method'](restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), authorizationType=authorization_type, apiKeyRequired=api_key_required, requestParameters=method.get('params'), requestModels=method.get('models'), **self._common_aws_args) if not m.get('created'): ret = _log_error_and_abort(ret, m) return ret ret = _log_changes(ret, '_deploy_method.create_api_method', m) lambda_uri = "" if method_name.lower() != 'options': lambda_uri = self._lambda_uri(self._lambda_name(resource_path, method_name), lambda_region=lambda_region) # NOTE: integration method is set to POST always, as otherwise AWS makes wrong assumptions # about the intent of the call. HTTP method will be passed to lambda as part of the API gateway context integration = ( __salt__['boto_apigateway.create_api_integration'](restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), integrationType=method.get('integration_type'), integrationHttpMethod='POST', uri=lambda_uri, credentials=lambda_integration_role, requestTemplates=method.get('request_templates'), **self._common_aws_args)) if not integration.get('created'): ret = _log_error_and_abort(ret, integration) return ret ret = _log_changes(ret, '_deploy_method.create_api_integration', integration) if 'responses' in method_data: for response, response_data in six.iteritems(method_data['responses']): httpStatus = str(response) # future lint: disable=blacklisted-function method_response = self._parse_method_response(method_name.lower(), _Swagger.SwaggerMethodResponse(response_data), httpStatus) mr = __salt__['boto_apigateway.create_api_method_response']( restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), statusCode=httpStatus, responseParameters=method_response.get('params'), responseModels=method_response.get('models'), **self._common_aws_args) if not mr.get('created'): ret = _log_error_and_abort(ret, mr) return ret ret = _log_changes(ret, '_deploy_method.create_api_method_response', mr) mir = __salt__['boto_apigateway.create_api_integration_response']( restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), statusCode=httpStatus, selectionPattern=method_response.get('pattern'), responseParameters=method_response.get('integration_params'), responseTemplates=method_response.get('response_templates'), **self._common_aws_args) if not mir.get('created'): ret = _log_error_and_abort(ret, mir) return ret ret = _log_changes(ret, '_deploy_method.create_api_integration_response', mir) else: raise ValueError('No responses specified for {0} {1}'.format(resource_path, method_name)) return ret def deploy_resources(self, ret, api_key_required, lambda_integration_role, lambda_region, authorization_type): ''' Method to deploy resources defined in the swagger file. ret a dictionary for returning status to Saltstack api_key_required True or False, whether api key is required to access this method. lambda_integration_role name of the IAM role or IAM role arn that Api Gateway will assume when executing the associated lambda function lambda_region the region for the lambda function that Api Gateway will integrate to. authorization_type 'NONE' or 'AWS_IAM' ''' for path, pathData in self.paths: resource = __salt__['boto_apigateway.create_api_resources'](restApiId=self.restApiId, path=path, **self._common_aws_args) if not resource.get('created'): ret = _log_error_and_abort(ret, resource) return ret ret = _log_changes(ret, 'deploy_resources', resource) for method, method_data in six.iteritems(pathData): if method in _Swagger.SWAGGER_OPERATION_NAMES: ret = self._deploy_method(ret, path, method, method_data, api_key_required, lambda_integration_role, lambda_region, authorization_type) return ret def usage_plan_present(name, plan_name, description=None, throttle=None, quota=None, region=None, key=None, keyid=None, profile=None): ''' Ensure the spcifieda usage plan with the corresponding metrics is deployed .. versionadded:: 2017.7.0 name name of the state plan_name [Required] name of the usage plan throttle [Optional] throttling parameters expressed as a dictionary. If provided, at least one of the throttling parameters must be present rateLimit rate per second at which capacity bucket is populated burstLimit maximum rate allowed quota [Optional] quota on the number of api calls permitted by the plan. If provided, limit and period must be present limit [Required] number of calls permitted per quota period offset [Optional] number of calls to be subtracted from the limit at the beginning of the period period [Required] period to which quota applies. Must be DAY, WEEK or MONTH .. code-block:: yaml UsagePlanPresent: boto_apigateway.usage_plan_present: - plan_name: my_usage_plan - throttle: rateLimit: 70 burstLimit: 100 - quota: limit: 1000 offset: 0 period: DAY - profile: my_profile ''' func_params = locals() ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) existing = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args) if 'error' in existing: ret['result'] = False ret['comment'] = 'Failed to describe existing usage plans' return ret if not existing['plans']: # plan does not exist, we need to create it if __opts__['test']: ret['comment'] = 'a new usage plan {0} would be created'.format(plan_name) ret['result'] = None return ret result = __salt__['boto_apigateway.create_usage_plan'](name=plan_name, description=description, throttle=throttle, quota=quota, **common_args) if 'error' in result: ret['result'] = False ret['comment'] = 'Failed to create a usage plan {0}, {1}'.format(plan_name, result['error']) return ret ret['changes']['old'] = {'plan': None} ret['comment'] = 'A new usage plan {0} has been created'.format(plan_name) else: # need an existing plan modified to match given value plan = existing['plans'][0] needs_updating = False modifiable_params = (('throttle', ('rateLimit', 'burstLimit')), ('quota', ('limit', 'offset', 'period'))) for p, fields in modifiable_params: for f in fields: actual_param = {} if func_params.get(p) is None else func_params.get(p) if plan.get(p, {}).get(f, None) != actual_param.get(f, None): needs_updating = True break if not needs_updating: ret['comment'] = 'usage plan {0} is already in a correct state'.format(plan_name) ret['result'] = True return ret if __opts__['test']: ret['comment'] = 'a new usage plan {0} would be updated'.format(plan_name) ret['result'] = None return ret result = __salt__['boto_apigateway.update_usage_plan'](plan['id'], throttle=throttle, quota=quota, **common_args) if 'error' in result: ret['result'] = False ret['comment'] = 'Failed to update a usage plan {0}, {1}'.format(plan_name, result['error']) return ret ret['changes']['old'] = {'plan': plan} ret['comment'] = 'usage plan {0} has been updated'.format(plan_name) newstate = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args) if 'error' in existing: ret['result'] = False ret['comment'] = 'Failed to describe existing usage plans after updates' return ret ret['changes']['new'] = {'plan': newstate['plans'][0]} except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret def usage_plan_absent(name, plan_name, region=None, key=None, keyid=None, profile=None): ''' Ensures usage plan identified by name is no longer present .. versionadded:: 2017.7.0 name name of the state plan_name name of the plan to remove .. code-block:: yaml usage plan absent: boto_apigateway.usage_plan_absent: - plan_name: my_usage_plan - profile: my_profile ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) existing = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args) if 'error' in existing: ret['result'] = False ret['comment'] = 'Failed to describe existing usage plans' return ret if not existing['plans']: ret['comment'] = 'Usage plan {0} does not exist already'.format(plan_name) return ret if __opts__['test']: ret['comment'] = 'Usage plan {0} exists and would be deleted'.format(plan_name) ret['result'] = None return ret plan_id = existing['plans'][0]['id'] result = __salt__['boto_apigateway.delete_usage_plan'](plan_id, **common_args) if 'error' in result: ret['result'] = False ret['comment'] = 'Failed to delete usage plan {0}, {1}'.format(plan_name, result) return ret ret['comment'] = 'Usage plan {0} has been deleted'.format(plan_name) ret['changes']['old'] = {'plan': existing['plans'][0]} ret['changes']['new'] = {'plan': None} except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret def usage_plan_association_present(name, plan_name, api_stages, region=None, key=None, keyid=None, profile=None): ''' Ensures usage plan identified by name is added to provided api_stages .. versionadded:: 2017.7.0 name name of the state plan_name name of the plan to use api_stages list of dictionaries, where each dictionary consists of the following keys: apiId apiId of the api to attach usage plan to stage stage name of the api to attach usage plan to .. code-block:: yaml UsagePlanAssociationPresent: boto_apigateway.usage_plan_association_present: - plan_name: my_plan - api_stages: - apiId: 9kb0404ec0 stage: my_stage - apiId: l9v7o2aj90 stage: my_stage - profile: my_profile ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) existing = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args) if 'error' in existing: ret['result'] = False ret['comment'] = 'Failed to describe existing usage plans' return ret if not existing['plans']: ret['comment'] = 'Usage plan {0} does not exist'.format(plan_name) ret['result'] = False return ret if len(existing['plans']) != 1: ret['comment'] = 'There are multiple usage plans with the same name - it is not supported' ret['result'] = False return ret plan = existing['plans'][0] plan_id = plan['id'] plan_stages = plan.get('apiStages', []) stages_to_add = [] for api in api_stages: if api not in plan_stages: stages_to_add.append(api) if not stages_to_add: ret['comment'] = 'Usage plan is already asssociated to all api stages' return ret result = __salt__['boto_apigateway.attach_usage_plan_to_apis'](plan_id, stages_to_add, **common_args) if 'error' in result: ret['comment'] = 'Failed to associate a usage plan {0} to the apis {1}, {2}'.format(plan_name, stages_to_add, result['error']) ret['result'] = False return ret ret['comment'] = 'successfully associated usage plan to apis' ret['changes']['old'] = plan_stages ret['changes']['new'] = result.get('result', {}).get('apiStages', []) except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret def usage_plan_association_absent(name, plan_name, api_stages, region=None, key=None, keyid=None, profile=None): ''' Ensures usage plan identified by name is removed from provided api_stages If a plan is associated to stages not listed in api_stages parameter, those associations remain intact. .. versionadded:: 2017.7.0 name name of the state plan_name name of the plan to use api_stages list of dictionaries, where each dictionary consists of the following keys: apiId apiId of the api to detach usage plan from stage stage name of the api to detach usage plan from .. code-block:: yaml UsagePlanAssociationAbsent: boto_apigateway.usage_plan_association_absent: - plan_name: my_plan - api_stages: - apiId: 9kb0404ec0 stage: my_stage - apiId: l9v7o2aj90 stage: my_stage - profile: my_profile ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) existing = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args) if 'error' in existing: ret['result'] = False ret['comment'] = 'Failed to describe existing usage plans' return ret if not existing['plans']: ret['comment'] = 'Usage plan {0} does not exist'.format(plan_name) ret['result'] = False return ret if len(existing['plans']) != 1: ret['comment'] = 'There are multiple usage plans with the same name - it is not supported' ret['result'] = False return ret plan = existing['plans'][0] plan_id = plan['id'] plan_stages = plan.get('apiStages', []) if not plan_stages: ret['comment'] = 'Usage plan {0} has no associated stages already'.format(plan_name) return ret stages_to_remove = [] for api in api_stages: if api in plan_stages: stages_to_remove.append(api) if not stages_to_remove: ret['comment'] = 'Usage plan is already not asssociated to any api stages' return ret result = __salt__['boto_apigateway.detach_usage_plan_from_apis'](plan_id, stages_to_remove, **common_args) if 'error' in result: ret['comment'] = 'Failed to disassociate a usage plan {0} from the apis {1}, {2}'.format(plan_name, stages_to_remove, result['error']) ret['result'] = False return ret ret['comment'] = 'successfully disassociated usage plan from apis' ret['changes']['old'] = plan_stages ret['changes']['new'] = result.get('result', {}).get('apiStages', []) except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret
saltstack/salt
salt/states/boto_apigateway.py
_dict_to_json_pretty
python
def _dict_to_json_pretty(d, sort_keys=True): ''' helper function to generate pretty printed json output ''' return salt.utils.json.dumps(d, indent=4, separators=(',', ': '), sort_keys=sort_keys)
helper function to generate pretty printed json output
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_apigateway.py#L463-L467
[ "def dumps(obj, **kwargs):\n '''\n .. versionadded:: 2018.3.0\n\n Wraps json.dumps, and assumes that ensure_ascii is False (unless explicitly\n passed as True) for unicode compatibility. Note that setting it to True\n will mess up any unicode characters, as they will be dumped as the string\n literal version of the unicode code point.\n\n On Python 2, encodes the result to a str since json.dumps does not want\n unicode types.\n\n You can pass an alternate json module (loaded via import_json() above)\n using the _json_module argument)\n '''\n json_module = kwargs.pop('_json_module', json)\n orig_enc_func = kwargs.pop('default', lambda x: x)\n\n def _enc_func(obj):\n obj = ThreadLocalProxy.unproxy(obj)\n return orig_enc_func(obj)\n\n if 'ensure_ascii' not in kwargs:\n kwargs['ensure_ascii'] = False\n if six.PY2:\n obj = salt.utils.data.encode(obj)\n return json_module.dumps(obj, default=_enc_func, **kwargs) # future lint: blacklisted-function\n" ]
# -*- coding: utf-8 -*- ''' Manage Apigateway Rest APIs =========================== .. versionadded:: 2016.11.0 :depends: - boto >= 2.8.0 - boto3 >= 1.2.1 - botocore >= 1.4.49 Create and destroy rest apis depending on a swagger version 2 definition file. Be aware that this interacts with Amazon's services, and so may incur charges. This module uses ``boto3``, which can be installed via package, or pip. This module accepts explicit vpc credentials but can also utilize IAM roles assigned to the instance through Instance Profiles. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More information available `here <http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html>`_. If IAM roles are not used you need to specify them either in a pillar file or in the minion's config file: .. code-block:: yaml vpc.keyid: GKTADJGHEIQSXMKKRBJ08H vpc.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs It's also possible to specify ``key``, ``keyid`` and ``region`` via a profile, either passed in as a dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 .. code-block:: yaml Ensure Apigateway API exists: boto_apigateway.present: - name: myfunction - region: us-east-1 - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import hashlib import logging import os import re # Import Salt Libs import salt.utils.files import salt.utils.json import salt.utils.yaml # Import 3rd-party libs from salt.ext import six log = logging.getLogger(__name__) def __virtual__(): ''' Only load if boto is available. ''' return 'boto_apigateway' if 'boto_apigateway.describe_apis' in __salt__ else False def present(name, api_name, swagger_file, stage_name, api_key_required, lambda_integration_role, lambda_region=None, stage_variables=None, region=None, key=None, keyid=None, profile=None, lambda_funcname_format='{stage}_{api}_{resource}_{method}', authorization_type='NONE', error_response_template=None, response_template=None): ''' Ensure the spcified api_name with the corresponding swaggerfile is deployed to the given stage_name in AWS ApiGateway. this state currently only supports ApiGateway integration with AWS Lambda, and CORS support is handled through a Mock integration. There may be multiple deployments for the API object, each deployment is tagged with a description (i.e. unique label) in pretty printed json format consisting of the following key/values. .. code-block:: text { "api_name": api_name, "swagger_file": basename_of_swagger_file "swagger_file_md5sum": md5sum_of_swagger_file, "swagger_info_object": info_object_content_in_swagger_file } Please note that the name of the lambda function to be integrated will be derived via the provided lambda_funcname_format parameters: - the default lambda_funcname_format is a string with the following substitutable keys: "{stage}_{api}_{resource}_{method}". The user can choose to reorder the known keys. - the stage key corresponds to the stage_name passed in. - the api key corresponds to the api_name passed in. - the resource corresponds to the resource path defined in the passed swagger file. - the method corresponds to the method for a resource path defined in the passed swagger file. For the default lambda_funcname_format, given the following input: .. code-block:: python api_name = ' Test Service' stage_name = 'alpha' basePath = '/api' path = '/a/{b}/c' method = 'POST' We will end up with the following Lambda Function Name that will be looked up: 'test_service_alpha_a_b_c_post' The canconicalization of these input parameters is done in the following order: 1. lambda_funcname_format is formatted with the input parameters as passed, 2. resulting string is stripped for leading/trailing spaces, 3. path parameter's curly braces are removed from the resource path, 4. consecutive spaces and forward slashes in the paths are replaced with '_' 5. consecutive '_' are replaced with '_' Please note that for error response handling, the swagger file must have an error response model with the following schema. The lambda functions should throw exceptions for any non successful responses. An optional pattern field can be specified in errorMessage field to aid the response mapping from Lambda to the proper error return status codes. .. code-block:: yaml Error: type: object properties: stackTrace: type: array items: type: array items: type: string description: call stack errorType: type: string description: error type errorMessage: type: string description: | Error message, will be matched based on pattern. If no pattern is specified, the default pattern used for response mapping will be +*. name The name of the state definition api_name The name of the rest api that we want to ensure exists in AWS API Gateway swagger_file Name of the location of the swagger rest api definition file in YAML format. stage_name Name of the stage we want to be associated with the given api_name and swagger_file definition api_key_required True or False - whether the API Key is required to call API methods lambda_integration_role The name or ARN of the IAM role that the AWS ApiGateway assumes when it executes your lambda function to handle incoming requests lambda_region The region where we expect to find the lambda functions. This is used to determine the region where we should look for the Lambda Function for integration purposes. The region determination is based on the following priority: 1. lambda_region as passed in (is not None) 2. if lambda_region is None, use the region as if a boto_lambda function were executed without explicitly specifying lambda region. 3. if region determined in (2) is different than the region used by boto_apigateway functions, a final lookup will be attempted using the boto_apigateway region. stage_variables A dict with variables and their values, or a pillar key (string) that contains a dict with variables and their values. key and values in the dict must be strings. {'string': 'string'} region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. lambda_funcname_format Please review the earlier example for the usage. The only substituable keys in the funcname format are {stage}, {api}, {resource}, {method}. Any other keys or positional subsitution parameters will be flagged as an invalid input. authorization_type This field can be either 'NONE', or 'AWS_IAM'. This will be applied to all methods in the given swagger spec file. Default is set to 'NONE' error_response_template String value that defines the response template mapping that should be applied in cases error occurs. Refer to AWS documentation for details: http://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-mapping-template-reference.html If set to None, the following default value is used: .. code-block:: text '#set($inputRoot = $input.path(\'$\'))\\n' '{\\n' ' "errorMessage" : "$inputRoot.errorMessage",\\n' ' "errorType" : "$inputRoot.errorType",\\n' ' "stackTrace" : [\\n' '#foreach($stackTrace in $inputRoot.stackTrace)\\n' ' [\\n' '#foreach($elem in $stackTrace)\\n' ' "$elem"\\n' '#if($foreach.hasNext),#end\\n' '#end\\n' ' ]\\n' '#if($foreach.hasNext),#end\\n' '#end\\n' ' ]\\n' .. versionadded:: 2017.7.0 response_template String value that defines the response template mapping applied in case of success (including OPTIONS method) If set to None, empty ({}) template is assumed, which will transfer response from the lambda function as is. .. versionadded:: 2017.7.0 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) # try to open the swagger file and basic validation swagger = _Swagger(api_name, stage_name, lambda_funcname_format, swagger_file, error_response_template, response_template, common_args) # retrieve stage variables stage_vars = _get_stage_variables(stage_variables) # verify if api and stage already exists ret = swagger.verify_api(ret) if ret.get('publish'): # there is a deployment label with signature matching the given api_name, # swagger file name, swagger file md5 sum, and swagger file info object # just reassociate the stage_name to the given deployment label. if __opts__['test']: ret['comment'] = ('[stage: {0}] will be reassociated to an already available ' 'deployment that matched the given [api_name: {1}] ' 'and [swagger_file: {2}].\n' 'Stage variables will be set ' 'to {3}.'.format(stage_name, api_name, swagger_file, stage_vars)) ret['result'] = None return ret return swagger.publish_api(ret, stage_vars) if ret.get('current'): # already at desired state for the stage, swagger_file, and api_name if __opts__['test']: ret['comment'] = ('[stage: {0}] is already at desired state with an associated ' 'deployment matching the given [api_name: {1}] ' 'and [swagger_file: {2}].\n' 'Stage variables will be set ' 'to {3}.'.format(stage_name, api_name, swagger_file, stage_vars)) ret['result'] = None return swagger.overwrite_stage_variables(ret, stage_vars) # there doesn't exist any previous deployments for the given swagger_file, we need # to redeploy the content of the swagger file to the api, models, and resources object # and finally create a new deployment and tie the stage_name to this new deployment if __opts__['test']: ret['comment'] = ('There is no deployment matching the given [api_name: {0}] ' 'and [swagger_file: {1}]. A new deployment will be ' 'created and the [stage_name: {2}] will then be associated ' 'to the newly created deployment.\n' 'Stage variables will be set ' 'to {3}.'.format(api_name, swagger_file, stage_name, stage_vars)) ret['result'] = None return ret ret = swagger.deploy_api(ret) if ret.get('abort'): return ret ret = swagger.deploy_models(ret) if ret.get('abort'): return ret ret = swagger.deploy_resources(ret, api_key_required=api_key_required, lambda_integration_role=lambda_integration_role, lambda_region=lambda_region, authorization_type=authorization_type) if ret.get('abort'): return ret ret = swagger.publish_api(ret, stage_vars) except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret def _get_stage_variables(stage_variables): ''' Helper function to retrieve stage variables from pillars/options, if the input is a string ''' ret = dict() if stage_variables is None: return ret if isinstance(stage_variables, six.string_types): if stage_variables in __opts__: ret = __opts__[stage_variables] master_opts = __pillar__.get('master', {}) if stage_variables in master_opts: ret = master_opts[stage_variables] if stage_variables in __pillar__: ret = __pillar__[stage_variables] elif isinstance(stage_variables, dict): ret = stage_variables if not isinstance(ret, dict): ret = dict() return ret def absent(name, api_name, stage_name, nuke_api=False, region=None, key=None, keyid=None, profile=None): ''' Ensure the stage_name associated with the given api_name deployed by boto_apigateway's present state is removed. If the currently associated deployment to the given stage_name has no other stages associated with it, the deployment will also be removed. name Name of the swagger file in YAML format api_name Name of the rest api on AWS ApiGateway to ensure is absent. stage_name Name of the stage to be removed irrespective of the swagger file content. If the current deployment associated with the stage_name has no other stages associated with it, the deployment will also be removed. nuke_api If True, removes the API itself only if there are no other stages associated with any other deployments once the given stage_name is removed. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) swagger = _Swagger(api_name, stage_name, '', None, None, None, common_args) if not swagger.restApiId: ret['comment'] = '[Rest API: {0}] does not exist.'.format(api_name) return ret if __opts__['test']: if nuke_api: ret['comment'] = ('[stage: {0}] will be deleted, if there are no other ' 'active stages, the [api: {1} will also be ' 'deleted.'.format(stage_name, api_name)) else: ret['comment'] = ('[stage: {0}] will be deleted.'.format(stage_name)) ret['result'] = None return ret ret = swagger.delete_stage(ret) if ret.get('abort'): return ret if nuke_api and swagger.no_more_deployments_remain(): ret = swagger.delete_api(ret) except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret # Helper Swagger Class for swagger version 2.0 API specification def _gen_md5_filehash(fname, *args): ''' helper function to generate a md5 hash of the swagger definition file any extra argument passed to the function is converted to a string and participates in the hash calculation ''' _hash = hashlib.md5() with salt.utils.files.fopen(fname, 'rb') as f: for chunk in iter(lambda: f.read(4096), b''): _hash.update(chunk) for extra_arg in args: _hash.update(six.b(str(extra_arg))) return _hash.hexdigest() # Heuristic on whether or not the property name loosely matches given set of 'interesting' factors # If you are interested in IDs for example, 'id', 'blah_id', 'blahId' would all match def _name_matches(name, matches): ''' Helper function to see if given name has any of the patterns in given matches ''' for m in matches: if name.endswith(m): return True if name.lower().endswith('_' + m.lower()): return True if name.lower() == m.lower(): return True return False def _object_reducer(o, names=('id', 'name', 'path', 'httpMethod', 'statusCode', 'Created', 'Deleted', 'Updated', 'Flushed', 'Associated', 'Disassociated')): ''' Helper function to reduce the amount of information that will be kept in the change log for API GW related return values ''' result = {} if isinstance(o, dict): for k, v in six.iteritems(o): if isinstance(v, dict): reduced = v if k == 'variables' else _object_reducer(v, names) if reduced or _name_matches(k, names): result[k] = reduced elif isinstance(v, list): newlist = [] for val in v: reduced = _object_reducer(val, names) if reduced or _name_matches(k, names): newlist.append(reduced) if newlist: result[k] = newlist else: if _name_matches(k, names): result[k] = v return result def _log_changes(ret, changekey, changevalue): ''' For logging create/update/delete operations to AWS ApiGateway ''' cl = ret['changes'].get('new', []) cl.append({changekey: _object_reducer(changevalue)}) ret['changes']['new'] = cl return ret def _log_error_and_abort(ret, obj): ''' helper function to update errors in the return structure ''' ret['result'] = False ret['abort'] = True if 'error' in obj: ret['comment'] = '{0}'.format(obj.get('error')) return ret class _Swagger(object): ''' this is a helper class that holds the swagger definition file and the associated logic related to how to interpret the file and apply it to AWS Api Gateway. The main interface to the outside world is in deploy_api, deploy_models, and deploy_resources methods. ''' SWAGGER_OBJ_V2_FIELDS = ('swagger', 'info', 'host', 'basePath', 'schemes', 'consumes', 'produces', 'paths', 'definitions', 'parameters', 'responses', 'securityDefinitions', 'security', 'tags', 'externalDocs') # SWAGGER OBJECT V2 Fields that are required by boto apigateway states. SWAGGER_OBJ_V2_FIELDS_REQUIRED = ('swagger', 'info', 'basePath', 'schemes', 'paths', 'definitions') # SWAGGER OPERATION NAMES SWAGGER_OPERATION_NAMES = ('get', 'put', 'post', 'delete', 'options', 'head', 'patch') SWAGGER_VERSIONS_SUPPORTED = ('2.0',) # VENDOR SPECIFIC FIELD PATTERNS VENDOR_EXT_PATTERN = re.compile('^x-') # JSON_SCHEMA_REF JSON_SCHEMA_DRAFT_4 = 'http://json-schema.org/draft-04/schema#' # AWS integration templates for normal and options methods REQUEST_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n' '{\n' '"header_params" : {\n' '#set ($map = $input.params().header)\n' '#foreach( $param in $map.entrySet() )\n' '"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n' '#end\n' '},\n' '"query_params" : {\n' '#set ($map = $input.params().querystring)\n' '#foreach( $param in $map.entrySet() )\n' '"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n' '#end\n' '},\n' '"path_params" : {\n' '#set ($map = $input.params().path)\n' '#foreach( $param in $map.entrySet() )\n' '"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n' '#end\n' '},\n' '"apigw_context" : {\n' '"apiId": "$context.apiId",\n' '"httpMethod": "$context.httpMethod",\n' '"requestId": "$context.requestId",\n' '"resourceId": "$context.resourceId",\n' '"resourcePath": "$context.resourcePath",\n' '"stage": "$context.stage",\n' '"identity": {\n' ' "user":"$context.identity.user",\n' ' "userArn":"$context.identity.userArn",\n' ' "userAgent":"$context.identity.userAgent",\n' ' "sourceIp":"$context.identity.sourceIp",\n' ' "cognitoIdentityId":"$context.identity.cognitoIdentityId",\n' ' "cognitoIdentityPoolId":"$context.identity.cognitoIdentityPoolId",\n' ' "cognitoAuthenticationType":"$context.identity.cognitoAuthenticationType",\n' ' "cognitoAuthenticationProvider":["$util.escapeJavaScript($context.identity.cognitoAuthenticationProvider)"],\n' ' "caller":"$context.identity.caller",\n' ' "apiKey":"$context.identity.apiKey",\n' ' "accountId":"$context.identity.accountId"\n' '}\n' '},\n' '"body_params" : $input.json(\'$\'),\n' '"stage_variables": {\n' '#foreach($variable in $stageVariables.keySet())\n' '"$variable": "$util.escapeJavaScript($stageVariables.get($variable))"\n' '#if($foreach.hasNext), #end\n' '#end\n' '}\n' '}'} REQUEST_OPTION_TEMPLATE = {'application/json': '{"statusCode": 200}'} # AWS integration response template mapping to convert stackTrace part or the error # to a uniform format containing strings only. Swagger does not seem to allow defining # an array of non-uniform types, to it is not possible to create error model to match # exactly what comes out of lambda functions in case of error. RESPONSE_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n' '{\n' ' "errorMessage" : "$inputRoot.errorMessage",\n' ' "errorType" : "$inputRoot.errorType",\n' ' "stackTrace" : [\n' '#foreach($stackTrace in $inputRoot.stackTrace)\n' ' [\n' '#foreach($elem in $stackTrace)\n' ' "$elem"\n' '#if($foreach.hasNext),#end\n' '#end\n' ' ]\n' '#if($foreach.hasNext),#end\n' '#end\n' ' ]\n' '}'} RESPONSE_OPTION_TEMPLATE = {} # This string should not be modified, every API created by this state will carry the description # below. AWS_API_DESCRIPTION = _dict_to_json_pretty({"provisioned_by": "Salt boto_apigateway.present State", "context": "See deployment or stage description"}) class SwaggerParameter(object): ''' This is a helper class for the Swagger Parameter Object ''' LOCATIONS = ('body', 'query', 'header', 'path') def __init__(self, paramdict): self._paramdict = paramdict @property def location(self): ''' returns location in the swagger parameter object ''' _location = self._paramdict.get('in') if _location in _Swagger.SwaggerParameter.LOCATIONS: return _location raise ValueError('Unsupported parameter location: {0} in Parameter Object'.format(_location)) @property def name(self): ''' returns parameter name in the swagger parameter object ''' _name = self._paramdict.get('name') if _name: if self.location == 'header': return 'method.request.header.{0}'.format(_name) elif self.location == 'query': return 'method.request.querystring.{0}'.format(_name) elif self.location == 'path': return 'method.request.path.{0}'.format(_name) return None raise ValueError('Parameter must have a name: {0}'.format(_dict_to_json_pretty(self._paramdict))) @property def schema(self): ''' returns the name of the schema given the reference in the swagger parameter object ''' if self.location == 'body': _schema = self._paramdict.get('schema') if _schema: if '$ref' in _schema: schema_name = _schema.get('$ref').split('/')[-1] return schema_name raise ValueError(('Body parameter must have a JSON reference ' 'to the schema definition due to Amazon API restrictions: {0}'.format(self.name))) raise ValueError('Body parameter must have a schema: {0}'.format(self.name)) return None class SwaggerMethodResponse(object): ''' Helper class for Swagger Method Response Object ''' def __init__(self, r): self._r = r @property def schema(self): ''' returns the name of the schema given the reference in the swagger method response object ''' _schema = self._r.get('schema') if _schema: if '$ref' in _schema: return _schema.get('$ref').split('/')[-1] raise ValueError(('Method response must have a JSON reference ' 'to the schema definition: {0}'.format(_schema))) return None @property def headers(self): ''' returns the headers dictionary in the method response object ''' _headers = self._r.get('headers', {}) return _headers def __init__(self, api_name, stage_name, lambda_funcname_format, swagger_file_path, error_response_template, response_template, common_aws_args): self._api_name = api_name self._stage_name = stage_name self._lambda_funcname_format = lambda_funcname_format self._common_aws_args = common_aws_args self._restApiId = '' self._deploymentId = '' self._error_response_template = error_response_template self._response_template = response_template if swagger_file_path is not None: if os.path.exists(swagger_file_path) and os.path.isfile(swagger_file_path): self._swagger_file = swagger_file_path self._md5_filehash = _gen_md5_filehash(self._swagger_file, error_response_template, response_template) with salt.utils.files.fopen(self._swagger_file, 'rb') as sf: self._cfg = salt.utils.yaml.safe_load(sf) self._swagger_version = '' else: raise IOError('Invalid swagger file path, {0}'.format(swagger_file_path)) self._validate_swagger_file() self._validate_lambda_funcname_format() self._resolve_api_id() def _is_http_error_rescode(self, code): ''' Helper function to determine if the passed code is in the 400~599 range of http error codes ''' return bool(re.match(r'^\s*[45]\d\d\s*$', code)) def _validate_error_response_model(self, paths, mods): ''' Helper function to help validate the convention established in the swagger file on how to handle response code mapping/integration ''' for path, ops in paths: for opname, opobj in six.iteritems(ops): if opname not in _Swagger.SWAGGER_OPERATION_NAMES: continue if 'responses' not in opobj: raise ValueError('missing mandatory responses field in path item object') for rescode, resobj in six.iteritems(opobj.get('responses')): if not self._is_http_error_rescode(str(rescode)): # future lint: disable=blacklisted-function continue # only check for response code from 400-599 if 'schema' not in resobj: raise ValueError('missing schema field in path {0}, ' 'op {1}, response {2}'.format(path, opname, rescode)) schemaobj = resobj.get('schema') if '$ref' not in schemaobj: raise ValueError('missing $ref field under schema in ' 'path {0}, op {1}, response {2}'.format(path, opname, rescode)) schemaobjref = schemaobj.get('$ref', '/') modelname = schemaobjref.split('/')[-1] if modelname not in mods: raise ValueError('model schema {0} reference not found ' 'under /definitions'.format(schemaobjref)) model = mods.get(modelname) if model.get('type') != 'object': raise ValueError('model schema {0} must be type object'.format(modelname)) if 'properties' not in model: raise ValueError('model schema {0} must have properties fields'.format(modelname)) modelprops = model.get('properties') if 'errorMessage' not in modelprops: raise ValueError('model schema {0} must have errorMessage as a property to ' 'match AWS convention. If pattern is not set, .+ will ' 'be used'.format(modelname)) def _validate_lambda_funcname_format(self): ''' Checks if the lambda function name format contains only known elements :return: True on success, ValueError raised on error ''' try: if self._lambda_funcname_format: known_kwargs = dict(stage='', api='', resource='', method='') self._lambda_funcname_format.format(**known_kwargs) return True except Exception: raise ValueError('Invalid lambda_funcname_format {0}. Please review ' 'documentation for known substitutable keys'.format(self._lambda_funcname_format)) def _validate_swagger_file(self): ''' High level check/validation of the input swagger file based on https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md This is not a full schema compliance check, but rather make sure that the input file (YAML or JSON) can be read into a dictionary, and we check for the content of the Swagger Object for version and info. ''' # check for any invalid fields for Swagger Object V2 for field in self._cfg: if (field not in _Swagger.SWAGGER_OBJ_V2_FIELDS and not _Swagger.VENDOR_EXT_PATTERN.match(field)): raise ValueError('Invalid Swagger Object Field: {0}'.format(field)) # check for Required Swagger fields by Saltstack boto apigateway state for field in _Swagger.SWAGGER_OBJ_V2_FIELDS_REQUIRED: if field not in self._cfg: raise ValueError('Missing Swagger Object Field: {0}'.format(field)) # check for Swagger Version self._swagger_version = self._cfg.get('swagger') if self._swagger_version not in _Swagger.SWAGGER_VERSIONS_SUPPORTED: raise ValueError('Unsupported Swagger version: {0},' 'Supported versions are {1}'.format(self._swagger_version, _Swagger.SWAGGER_VERSIONS_SUPPORTED)) log.info(type(self._models)) self._validate_error_response_model(self.paths, self._models()) @property def md5_filehash(self): ''' returns md5 hash for the swagger file ''' return self._md5_filehash @property def info(self): ''' returns the swagger info object as a dictionary ''' info = self._cfg.get('info') if not info: raise ValueError('Info Object has no values') return info @property def info_json(self): ''' returns the swagger info object as a pretty printed json string. ''' return _dict_to_json_pretty(self.info) @property def rest_api_name(self): ''' returns the name of the api ''' return self._api_name @property def rest_api_version(self): ''' returns the version field in the swagger info object ''' version = self.info.get('version') if not version: raise ValueError('Missing version value in Info Object') return version def _models(self): ''' returns an iterator for the models specified in the swagger file ''' models = self._cfg.get('definitions') if not models: raise ValueError('Definitions Object has no values, You need to define them in your swagger file') return models def models(self): ''' generator to return the tuple of model and its schema to create on aws. ''' model_dict = self._build_all_dependencies() while True: model = self._get_model_without_dependencies(model_dict) if not model: break yield (model, self._models().get(model)) @property def paths(self): ''' returns an iterator for the relative resource paths specified in the swagger file ''' paths = self._cfg.get('paths') if not paths: raise ValueError('Paths Object has no values, You need to define them in your swagger file') for path in paths: if not path.startswith('/'): raise ValueError('Path object {0} should start with /. Please fix it'.format(path)) return six.iteritems(paths) @property def basePath(self): ''' returns the base path field as defined in the swagger file ''' basePath = self._cfg.get('basePath', '') return basePath @property def restApiId(self): ''' returns the rest api id as returned by AWS on creation of the rest api ''' return self._restApiId @restApiId.setter def restApiId(self, restApiId): ''' allows the assignment of the rest api id on creation of the rest api ''' self._restApiId = restApiId @property def deployment_label_json(self): ''' this property returns the unique description in pretty printed json for a particular api deployment ''' return _dict_to_json_pretty(self.deployment_label) @property def deployment_label(self): ''' this property returns the deployment label dictionary (mainly used by stage description) ''' label = dict() label['swagger_info_object'] = self.info label['api_name'] = self.rest_api_name label['swagger_file'] = os.path.basename(self._swagger_file) label['swagger_file_md5sum'] = self.md5_filehash return label # methods to interact with boto_apigateway execution modules def _one_or_more_stages_remain(self, deploymentId): ''' Helper function to find whether there are other stages still associated with a deployment ''' stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args).get('stages') return bool(stages) def no_more_deployments_remain(self): ''' Helper function to find whether there are deployments left with stages associated ''' no_more_deployments = True deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId, **self._common_aws_args).get('deployments') if deployments: for deployment in deployments: deploymentId = deployment.get('id') stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args).get('stages') if stages: no_more_deployments = False break return no_more_deployments def _get_current_deployment_id(self): ''' Helper method to find the deployment id that the stage name is currently assocaited with. ''' deploymentId = '' stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, **self._common_aws_args).get('stage') if stage: deploymentId = stage.get('deploymentId') return deploymentId def _get_current_deployment_label(self): ''' Helper method to find the deployment label that the stage_name is currently associated with. ''' deploymentId = self._get_current_deployment_id() deployment = __salt__['boto_apigateway.describe_api_deployment'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args).get('deployment') if deployment: return deployment.get('description') return None def _get_desired_deployment_id(self): ''' Helper method to return the deployment id matching the desired deployment label for this Swagger object based on the given api_name, swagger_file ''' deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId, **self._common_aws_args).get('deployments') if deployments: for deployment in deployments: if deployment.get('description') == self.deployment_label_json: return deployment.get('id') return '' def overwrite_stage_variables(self, ret, stage_variables): ''' overwrite the given stage_name's stage variables with the given stage_variables ''' res = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId, stageName=self._stage_name, variables=stage_variables, **self._common_aws_args) if not res.get('overwrite'): ret['result'] = False ret['abort'] = True ret['comment'] = res.get('error') else: ret = _log_changes(ret, 'overwrite_stage_variables', res.get('stage')) return ret def _set_current_deployment(self, stage_desc_json, stage_variables): ''' Helper method to associate the stage_name to the given deploymentId and make this current ''' stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, **self._common_aws_args).get('stage') if not stage: stage = __salt__['boto_apigateway.create_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, deploymentId=self._deploymentId, description=stage_desc_json, variables=stage_variables, **self._common_aws_args) if not stage.get('stage'): return {'set': False, 'error': stage.get('error')} else: # overwrite the stage variables overwrite = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId, stageName=self._stage_name, variables=stage_variables, **self._common_aws_args) if not overwrite.get('stage'): return {'set': False, 'error': overwrite.get('error')} return __salt__['boto_apigateway.activate_api_deployment'](restApiId=self.restApiId, stageName=self._stage_name, deploymentId=self._deploymentId, **self._common_aws_args) def _resolve_api_id(self): ''' returns an Api Id that matches the given api_name and the hardcoded _Swagger.AWS_API_DESCRIPTION as the api description ''' apis = __salt__['boto_apigateway.describe_apis'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args).get('restapi') if apis: if len(apis) == 1: self.restApiId = apis[0].get('id') else: raise ValueError('Multiple APIs matching given name {0} and ' 'description {1}'.format(self.rest_api_name, self.info_json)) def delete_stage(self, ret): ''' Method to delete the given stage_name. If the current deployment tied to the given stage_name has no other stages associated with it, the deployment will be removed as well ''' deploymentId = self._get_current_deployment_id() if deploymentId: result = __salt__['boto_apigateway.delete_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, **self._common_aws_args) if not result.get('deleted'): ret['abort'] = True ret['result'] = False ret['comment'] = 'delete_stage delete_api_stage, {0}'.format(result.get('error')) else: # check if it is safe to delete the deployment as well. if not self._one_or_more_stages_remain(deploymentId): result = __salt__['boto_apigateway.delete_api_deployment'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args) if not result.get('deleted'): ret['abort'] = True ret['result'] = False ret['comment'] = 'delete_stage delete_api_deployment, {0}'.format(result.get('error')) else: ret['comment'] = 'stage {0} has been deleted.\n'.format(self._stage_name) else: # no matching stage_name/deployment found ret['comment'] = 'stage {0} does not exist'.format(self._stage_name) return ret def verify_api(self, ret): ''' this method helps determine if the given stage_name is already on a deployment label matching the input api_name, swagger_file. If yes, returns abort with comment indicating already at desired state. If not and there is previous deployment labels in AWS matching the given input api_name and swagger file, indicate to the caller that we only need to reassociate stage_name to the previously existing deployment label. ''' if self.restApiId: deployed_label_json = self._get_current_deployment_label() if deployed_label_json == self.deployment_label_json: ret['comment'] = ('Already at desired state, the stage {0} is already at the desired ' 'deployment label:\n{1}'.format(self._stage_name, deployed_label_json)) ret['current'] = True return ret else: self._deploymentId = self._get_desired_deployment_id() if self._deploymentId: ret['publish'] = True return ret def publish_api(self, ret, stage_variables): ''' this method tie the given stage_name to a deployment matching the given swagger_file ''' stage_desc = dict() stage_desc['current_deployment_label'] = self.deployment_label stage_desc_json = _dict_to_json_pretty(stage_desc) if self._deploymentId: # just do a reassociate of stage_name to an already existing deployment res = self._set_current_deployment(stage_desc_json, stage_variables) if not res.get('set'): ret['abort'] = True ret['result'] = False ret['comment'] = res.get('error') else: ret = _log_changes(ret, 'publish_api (reassociate deployment, set stage_variables)', res.get('response')) else: # no deployment existed for the given swagger_file for this Swagger object res = __salt__['boto_apigateway.create_api_deployment'](restApiId=self.restApiId, stageName=self._stage_name, stageDescription=stage_desc_json, description=self.deployment_label_json, variables=stage_variables, **self._common_aws_args) if not res.get('created'): ret['abort'] = True ret['result'] = False ret['comment'] = res.get('error') else: ret = _log_changes(ret, 'publish_api (new deployment)', res.get('deployment')) return ret def _cleanup_api(self): ''' Helper method to clean up resources and models if we detected a change in the swagger file for a stage ''' resources = __salt__['boto_apigateway.describe_api_resources'](restApiId=self.restApiId, **self._common_aws_args) if resources.get('resources'): res = resources.get('resources')[1:] res.reverse() for resource in res: delres = __salt__['boto_apigateway.delete_api_resources'](restApiId=self.restApiId, path=resource.get('path'), **self._common_aws_args) if not delres.get('deleted'): return delres models = __salt__['boto_apigateway.describe_api_models'](restApiId=self.restApiId, **self._common_aws_args) if models.get('models'): for model in models.get('models'): delres = __salt__['boto_apigateway.delete_api_model'](restApiId=self.restApiId, modelName=model.get('name'), **self._common_aws_args) if not delres.get('deleted'): return delres return {'deleted': True} def deploy_api(self, ret): ''' this method create the top level rest api in AWS apigateway ''' if self.restApiId: res = self._cleanup_api() if not res.get('deleted'): ret['comment'] = 'Failed to cleanup restAreId {0}'.format(self.restApiId) ret['abort'] = True ret['result'] = False return ret return ret response = __salt__['boto_apigateway.create_api'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args) if not response.get('created'): ret['result'] = False ret['abort'] = True if 'error' in response: ret['comment'] = 'Failed to create rest api: {0}.'.format(response['error']['message']) return ret self.restApiId = response.get('restapi', {}).get('id') return _log_changes(ret, 'deploy_api', response.get('restapi')) def delete_api(self, ret): ''' Method to delete a Rest Api named defined in the swagger file's Info Object's title value. ret a dictionary for returning status to Saltstack ''' exists_response = __salt__['boto_apigateway.api_exists'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args) if exists_response.get('exists'): if __opts__['test']: ret['comment'] = 'Rest API named {0} is set to be deleted.'.format(self.rest_api_name) ret['result'] = None ret['abort'] = True return ret delete_api_response = __salt__['boto_apigateway.delete_api'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args) if not delete_api_response.get('deleted'): ret['result'] = False ret['abort'] = True if 'error' in delete_api_response: ret['comment'] = 'Failed to delete rest api: {0}.'.format(delete_api_response['error']['message']) return ret ret = _log_changes(ret, 'delete_api', delete_api_response) else: ret['comment'] = ('api already absent for swagger file: ' '{0}, desc: {1}'.format(self.rest_api_name, self.info_json)) return ret def _aws_model_ref_from_swagger_ref(self, r): ''' Helper function to reference models created on aws apigw ''' model_name = r.split('/')[-1] return 'https://apigateway.amazonaws.com/restapis/{0}/models/{1}'.format(self.restApiId, model_name) def _update_schema_to_aws_notation(self, schema): ''' Helper function to map model schema to aws notation ''' result = {} for k, v in schema.items(): if k == '$ref': v = self._aws_model_ref_from_swagger_ref(v) if isinstance(v, dict): v = self._update_schema_to_aws_notation(v) result[k] = v return result def _build_dependent_model_list(self, obj_schema): ''' Helper function to build the list of models the given object schema is referencing. ''' dep_models_list = [] if obj_schema: obj_schema['type'] = obj_schema.get('type', 'object') if obj_schema['type'] == 'array': dep_models_list.extend(self._build_dependent_model_list(obj_schema.get('items', {}))) else: ref = obj_schema.get('$ref') if ref: ref_obj_model = ref.split("/")[-1] ref_obj_schema = self._models().get(ref_obj_model) dep_models_list.extend(self._build_dependent_model_list(ref_obj_schema)) dep_models_list.extend([ref_obj_model]) else: # need to walk each property object properties = obj_schema.get('properties') if properties: for _, prop_obj_schema in six.iteritems(properties): dep_models_list.extend(self._build_dependent_model_list(prop_obj_schema)) return list(set(dep_models_list)) def _build_all_dependencies(self): ''' Helper function to build a map of model to their list of model reference dependencies ''' ret = {} for model, schema in six.iteritems(self._models()): dep_list = self._build_dependent_model_list(schema) ret[model] = dep_list return ret def _get_model_without_dependencies(self, models_dict): ''' Helper function to find the next model that should be created ''' next_model = None if not models_dict: return next_model for model, dependencies in six.iteritems(models_dict): if dependencies == []: next_model = model break if next_model is None: raise ValueError('incomplete model definitions, models in dependency ' 'list not defined: {0}'.format(models_dict)) # remove the model from other depednencies before returning models_dict.pop(next_model) for model, dep_list in six.iteritems(models_dict): if next_model in dep_list: dep_list.remove(next_model) return next_model def deploy_models(self, ret): ''' Method to deploy swagger file's definition objects and associated schema to AWS Apigateway as Models ret a dictionary for returning status to Saltstack ''' for model, schema in self.models(): # add in a few attributes into the model schema that AWS expects # _schema = schema.copy() _schema = self._update_schema_to_aws_notation(schema) _schema.update({'$schema': _Swagger.JSON_SCHEMA_DRAFT_4, 'title': '{0} Schema'.format(model)}) # check to see if model already exists, aws has 2 default models [Empty, Error] # which may need upate with data from swagger file model_exists_response = __salt__['boto_apigateway.api_model_exists'](restApiId=self.restApiId, modelName=model, **self._common_aws_args) if model_exists_response.get('exists'): update_model_schema_response = ( __salt__['boto_apigateway.update_api_model_schema'](restApiId=self.restApiId, modelName=model, schema=_dict_to_json_pretty(_schema), **self._common_aws_args)) if not update_model_schema_response.get('updated'): ret['result'] = False ret['abort'] = True if 'error' in update_model_schema_response: ret['comment'] = ('Failed to update existing model {0} with schema {1}, ' 'error: {2}'.format(model, _dict_to_json_pretty(schema), update_model_schema_response['error']['message'])) return ret ret = _log_changes(ret, 'deploy_models', update_model_schema_response) else: create_model_response = ( __salt__['boto_apigateway.create_api_model'](restApiId=self.restApiId, modelName=model, modelDescription=model, schema=_dict_to_json_pretty(_schema), contentType='application/json', **self._common_aws_args)) if not create_model_response.get('created'): ret['result'] = False ret['abort'] = True if 'error' in create_model_response: ret['comment'] = ('Failed to create model {0}, schema {1}, ' 'error: {2}'.format(model, _dict_to_json_pretty(schema), create_model_response['error']['message'])) return ret ret = _log_changes(ret, 'deploy_models', create_model_response) return ret def _lambda_name(self, resourcePath, httpMethod): ''' Helper method to construct lambda name based on the rule specified in doc string of boto_apigateway.api_present function ''' lambda_name = self._lambda_funcname_format.format(stage=self._stage_name, api=self.rest_api_name, resource=resourcePath, method=httpMethod) lambda_name = lambda_name.strip() lambda_name = re.sub(r'{|}', '', lambda_name) lambda_name = re.sub(r'\s+|/', '_', lambda_name).lower() return re.sub(r'_+', '_', lambda_name) def _lambda_uri(self, lambda_name, lambda_region): ''' Helper Method to construct the lambda uri for use in method integration ''' profile = self._common_aws_args.get('profile') region = self._common_aws_args.get('region') lambda_region = __utils__['boto3.get_region']('lambda', lambda_region, profile) apigw_region = __utils__['boto3.get_region']('apigateway', region, profile) lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args) if lambda_region != apigw_region: if not lambda_desc.get('function'): # try look up in the same region as the apigateway as well if previous lookup failed lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args) if not lambda_desc.get('function'): raise ValueError('Could not find lambda function {0} in ' 'regions [{1}, {2}].'.format(lambda_name, lambda_region, apigw_region)) lambda_arn = lambda_desc.get('function').get('FunctionArn') lambda_uri = ('arn:aws:apigateway:{0}:lambda:path/2015-03-31' '/functions/{1}/invocations'.format(apigw_region, lambda_arn)) return lambda_uri def _parse_method_data(self, method_name, method_data): ''' Helper function to construct the method request params, models, request_templates and integration_type values needed to configure method request integration/mappings. ''' method_params = {} method_models = {} if 'parameters' in method_data: for param in method_data['parameters']: p = _Swagger.SwaggerParameter(param) if p.name: method_params[p.name] = True if p.schema: method_models['application/json'] = p.schema request_templates = _Swagger.REQUEST_OPTION_TEMPLATE if method_name == 'options' else _Swagger.REQUEST_TEMPLATE integration_type = "MOCK" if method_name == 'options' else "AWS" return {'params': method_params, 'models': method_models, 'request_templates': request_templates, 'integration_type': integration_type} def _find_patterns(self, o): result = [] if isinstance(o, dict): for k, v in six.iteritems(o): if isinstance(v, dict): result.extend(self._find_patterns(v)) else: if k == 'pattern': result.append(v) return result def _get_pattern_for_schema(self, schema_name, httpStatus): ''' returns the pattern specified in a response schema ''' defaultPattern = '.+' if self._is_http_error_rescode(httpStatus) else '.*' model = self._models().get(schema_name) patterns = self._find_patterns(model) return patterns[0] if patterns else defaultPattern def _get_response_template(self, method_name, http_status): if method_name == 'options' or not self._is_http_error_rescode(http_status): response_templates = {'application/json': self._response_template} \ if self._response_template else self.RESPONSE_OPTION_TEMPLATE else: response_templates = {'application/json': self._error_response_template} \ if self._error_response_template else self.RESPONSE_TEMPLATE return response_templates def _parse_method_response(self, method_name, method_response, httpStatus): ''' Helper function to construct the method response params, models, and integration_params values needed to configure method response integration/mappings. ''' method_response_models = {} method_response_pattern = '.*' if method_response.schema: method_response_models['application/json'] = method_response.schema method_response_pattern = self._get_pattern_for_schema(method_response.schema, httpStatus) method_response_params = {} method_integration_response_params = {} for header in method_response.headers: response_header = 'method.response.header.{0}'.format(header) method_response_params[response_header] = False header_data = method_response.headers.get(header) method_integration_response_params[response_header] = ( "'{0}'".format(header_data.get('default')) if 'default' in header_data else "'*'") response_templates = self._get_response_template(method_name, httpStatus) return {'params': method_response_params, 'models': method_response_models, 'integration_params': method_integration_response_params, 'pattern': method_response_pattern, 'response_templates': response_templates} def _deploy_method(self, ret, resource_path, method_name, method_data, api_key_required, lambda_integration_role, lambda_region, authorization_type): ''' Method to create a method for the given resource path, along with its associated request and response integrations. ret a dictionary for returning status to Saltstack resource_path the full resource path where the named method_name will be associated with. method_name a string that is one of the following values: 'delete', 'get', 'head', 'options', 'patch', 'post', 'put' method_data the value dictionary for this method in the swagger definition file. api_key_required True or False, whether api key is required to access this method. lambda_integration_role name of the IAM role or IAM role arn that Api Gateway will assume when executing the associated lambda function lambda_region the region for the lambda function that Api Gateway will integrate to. authorization_type 'NONE' or 'AWS_IAM' ''' method = self._parse_method_data(method_name.lower(), method_data) # for options method to enable CORS, api_key_required will be set to False always. # authorization_type will be set to 'NONE' always. if method_name.lower() == 'options': api_key_required = False authorization_type = 'NONE' m = __salt__['boto_apigateway.create_api_method'](restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), authorizationType=authorization_type, apiKeyRequired=api_key_required, requestParameters=method.get('params'), requestModels=method.get('models'), **self._common_aws_args) if not m.get('created'): ret = _log_error_and_abort(ret, m) return ret ret = _log_changes(ret, '_deploy_method.create_api_method', m) lambda_uri = "" if method_name.lower() != 'options': lambda_uri = self._lambda_uri(self._lambda_name(resource_path, method_name), lambda_region=lambda_region) # NOTE: integration method is set to POST always, as otherwise AWS makes wrong assumptions # about the intent of the call. HTTP method will be passed to lambda as part of the API gateway context integration = ( __salt__['boto_apigateway.create_api_integration'](restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), integrationType=method.get('integration_type'), integrationHttpMethod='POST', uri=lambda_uri, credentials=lambda_integration_role, requestTemplates=method.get('request_templates'), **self._common_aws_args)) if not integration.get('created'): ret = _log_error_and_abort(ret, integration) return ret ret = _log_changes(ret, '_deploy_method.create_api_integration', integration) if 'responses' in method_data: for response, response_data in six.iteritems(method_data['responses']): httpStatus = str(response) # future lint: disable=blacklisted-function method_response = self._parse_method_response(method_name.lower(), _Swagger.SwaggerMethodResponse(response_data), httpStatus) mr = __salt__['boto_apigateway.create_api_method_response']( restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), statusCode=httpStatus, responseParameters=method_response.get('params'), responseModels=method_response.get('models'), **self._common_aws_args) if not mr.get('created'): ret = _log_error_and_abort(ret, mr) return ret ret = _log_changes(ret, '_deploy_method.create_api_method_response', mr) mir = __salt__['boto_apigateway.create_api_integration_response']( restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), statusCode=httpStatus, selectionPattern=method_response.get('pattern'), responseParameters=method_response.get('integration_params'), responseTemplates=method_response.get('response_templates'), **self._common_aws_args) if not mir.get('created'): ret = _log_error_and_abort(ret, mir) return ret ret = _log_changes(ret, '_deploy_method.create_api_integration_response', mir) else: raise ValueError('No responses specified for {0} {1}'.format(resource_path, method_name)) return ret def deploy_resources(self, ret, api_key_required, lambda_integration_role, lambda_region, authorization_type): ''' Method to deploy resources defined in the swagger file. ret a dictionary for returning status to Saltstack api_key_required True or False, whether api key is required to access this method. lambda_integration_role name of the IAM role or IAM role arn that Api Gateway will assume when executing the associated lambda function lambda_region the region for the lambda function that Api Gateway will integrate to. authorization_type 'NONE' or 'AWS_IAM' ''' for path, pathData in self.paths: resource = __salt__['boto_apigateway.create_api_resources'](restApiId=self.restApiId, path=path, **self._common_aws_args) if not resource.get('created'): ret = _log_error_and_abort(ret, resource) return ret ret = _log_changes(ret, 'deploy_resources', resource) for method, method_data in six.iteritems(pathData): if method in _Swagger.SWAGGER_OPERATION_NAMES: ret = self._deploy_method(ret, path, method, method_data, api_key_required, lambda_integration_role, lambda_region, authorization_type) return ret def usage_plan_present(name, plan_name, description=None, throttle=None, quota=None, region=None, key=None, keyid=None, profile=None): ''' Ensure the spcifieda usage plan with the corresponding metrics is deployed .. versionadded:: 2017.7.0 name name of the state plan_name [Required] name of the usage plan throttle [Optional] throttling parameters expressed as a dictionary. If provided, at least one of the throttling parameters must be present rateLimit rate per second at which capacity bucket is populated burstLimit maximum rate allowed quota [Optional] quota on the number of api calls permitted by the plan. If provided, limit and period must be present limit [Required] number of calls permitted per quota period offset [Optional] number of calls to be subtracted from the limit at the beginning of the period period [Required] period to which quota applies. Must be DAY, WEEK or MONTH .. code-block:: yaml UsagePlanPresent: boto_apigateway.usage_plan_present: - plan_name: my_usage_plan - throttle: rateLimit: 70 burstLimit: 100 - quota: limit: 1000 offset: 0 period: DAY - profile: my_profile ''' func_params = locals() ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) existing = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args) if 'error' in existing: ret['result'] = False ret['comment'] = 'Failed to describe existing usage plans' return ret if not existing['plans']: # plan does not exist, we need to create it if __opts__['test']: ret['comment'] = 'a new usage plan {0} would be created'.format(plan_name) ret['result'] = None return ret result = __salt__['boto_apigateway.create_usage_plan'](name=plan_name, description=description, throttle=throttle, quota=quota, **common_args) if 'error' in result: ret['result'] = False ret['comment'] = 'Failed to create a usage plan {0}, {1}'.format(plan_name, result['error']) return ret ret['changes']['old'] = {'plan': None} ret['comment'] = 'A new usage plan {0} has been created'.format(plan_name) else: # need an existing plan modified to match given value plan = existing['plans'][0] needs_updating = False modifiable_params = (('throttle', ('rateLimit', 'burstLimit')), ('quota', ('limit', 'offset', 'period'))) for p, fields in modifiable_params: for f in fields: actual_param = {} if func_params.get(p) is None else func_params.get(p) if plan.get(p, {}).get(f, None) != actual_param.get(f, None): needs_updating = True break if not needs_updating: ret['comment'] = 'usage plan {0} is already in a correct state'.format(plan_name) ret['result'] = True return ret if __opts__['test']: ret['comment'] = 'a new usage plan {0} would be updated'.format(plan_name) ret['result'] = None return ret result = __salt__['boto_apigateway.update_usage_plan'](plan['id'], throttle=throttle, quota=quota, **common_args) if 'error' in result: ret['result'] = False ret['comment'] = 'Failed to update a usage plan {0}, {1}'.format(plan_name, result['error']) return ret ret['changes']['old'] = {'plan': plan} ret['comment'] = 'usage plan {0} has been updated'.format(plan_name) newstate = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args) if 'error' in existing: ret['result'] = False ret['comment'] = 'Failed to describe existing usage plans after updates' return ret ret['changes']['new'] = {'plan': newstate['plans'][0]} except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret def usage_plan_absent(name, plan_name, region=None, key=None, keyid=None, profile=None): ''' Ensures usage plan identified by name is no longer present .. versionadded:: 2017.7.0 name name of the state plan_name name of the plan to remove .. code-block:: yaml usage plan absent: boto_apigateway.usage_plan_absent: - plan_name: my_usage_plan - profile: my_profile ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) existing = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args) if 'error' in existing: ret['result'] = False ret['comment'] = 'Failed to describe existing usage plans' return ret if not existing['plans']: ret['comment'] = 'Usage plan {0} does not exist already'.format(plan_name) return ret if __opts__['test']: ret['comment'] = 'Usage plan {0} exists and would be deleted'.format(plan_name) ret['result'] = None return ret plan_id = existing['plans'][0]['id'] result = __salt__['boto_apigateway.delete_usage_plan'](plan_id, **common_args) if 'error' in result: ret['result'] = False ret['comment'] = 'Failed to delete usage plan {0}, {1}'.format(plan_name, result) return ret ret['comment'] = 'Usage plan {0} has been deleted'.format(plan_name) ret['changes']['old'] = {'plan': existing['plans'][0]} ret['changes']['new'] = {'plan': None} except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret def usage_plan_association_present(name, plan_name, api_stages, region=None, key=None, keyid=None, profile=None): ''' Ensures usage plan identified by name is added to provided api_stages .. versionadded:: 2017.7.0 name name of the state plan_name name of the plan to use api_stages list of dictionaries, where each dictionary consists of the following keys: apiId apiId of the api to attach usage plan to stage stage name of the api to attach usage plan to .. code-block:: yaml UsagePlanAssociationPresent: boto_apigateway.usage_plan_association_present: - plan_name: my_plan - api_stages: - apiId: 9kb0404ec0 stage: my_stage - apiId: l9v7o2aj90 stage: my_stage - profile: my_profile ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) existing = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args) if 'error' in existing: ret['result'] = False ret['comment'] = 'Failed to describe existing usage plans' return ret if not existing['plans']: ret['comment'] = 'Usage plan {0} does not exist'.format(plan_name) ret['result'] = False return ret if len(existing['plans']) != 1: ret['comment'] = 'There are multiple usage plans with the same name - it is not supported' ret['result'] = False return ret plan = existing['plans'][0] plan_id = plan['id'] plan_stages = plan.get('apiStages', []) stages_to_add = [] for api in api_stages: if api not in plan_stages: stages_to_add.append(api) if not stages_to_add: ret['comment'] = 'Usage plan is already asssociated to all api stages' return ret result = __salt__['boto_apigateway.attach_usage_plan_to_apis'](plan_id, stages_to_add, **common_args) if 'error' in result: ret['comment'] = 'Failed to associate a usage plan {0} to the apis {1}, {2}'.format(plan_name, stages_to_add, result['error']) ret['result'] = False return ret ret['comment'] = 'successfully associated usage plan to apis' ret['changes']['old'] = plan_stages ret['changes']['new'] = result.get('result', {}).get('apiStages', []) except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret def usage_plan_association_absent(name, plan_name, api_stages, region=None, key=None, keyid=None, profile=None): ''' Ensures usage plan identified by name is removed from provided api_stages If a plan is associated to stages not listed in api_stages parameter, those associations remain intact. .. versionadded:: 2017.7.0 name name of the state plan_name name of the plan to use api_stages list of dictionaries, where each dictionary consists of the following keys: apiId apiId of the api to detach usage plan from stage stage name of the api to detach usage plan from .. code-block:: yaml UsagePlanAssociationAbsent: boto_apigateway.usage_plan_association_absent: - plan_name: my_plan - api_stages: - apiId: 9kb0404ec0 stage: my_stage - apiId: l9v7o2aj90 stage: my_stage - profile: my_profile ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) existing = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args) if 'error' in existing: ret['result'] = False ret['comment'] = 'Failed to describe existing usage plans' return ret if not existing['plans']: ret['comment'] = 'Usage plan {0} does not exist'.format(plan_name) ret['result'] = False return ret if len(existing['plans']) != 1: ret['comment'] = 'There are multiple usage plans with the same name - it is not supported' ret['result'] = False return ret plan = existing['plans'][0] plan_id = plan['id'] plan_stages = plan.get('apiStages', []) if not plan_stages: ret['comment'] = 'Usage plan {0} has no associated stages already'.format(plan_name) return ret stages_to_remove = [] for api in api_stages: if api in plan_stages: stages_to_remove.append(api) if not stages_to_remove: ret['comment'] = 'Usage plan is already not asssociated to any api stages' return ret result = __salt__['boto_apigateway.detach_usage_plan_from_apis'](plan_id, stages_to_remove, **common_args) if 'error' in result: ret['comment'] = 'Failed to disassociate a usage plan {0} from the apis {1}, {2}'.format(plan_name, stages_to_remove, result['error']) ret['result'] = False return ret ret['comment'] = 'successfully disassociated usage plan from apis' ret['changes']['old'] = plan_stages ret['changes']['new'] = result.get('result', {}).get('apiStages', []) except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret
saltstack/salt
salt/states/boto_apigateway.py
_name_matches
python
def _name_matches(name, matches): ''' Helper function to see if given name has any of the patterns in given matches ''' for m in matches: if name.endswith(m): return True if name.lower().endswith('_' + m.lower()): return True if name.lower() == m.lower(): return True return False
Helper function to see if given name has any of the patterns in given matches
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_apigateway.py#L472-L483
null
# -*- coding: utf-8 -*- ''' Manage Apigateway Rest APIs =========================== .. versionadded:: 2016.11.0 :depends: - boto >= 2.8.0 - boto3 >= 1.2.1 - botocore >= 1.4.49 Create and destroy rest apis depending on a swagger version 2 definition file. Be aware that this interacts with Amazon's services, and so may incur charges. This module uses ``boto3``, which can be installed via package, or pip. This module accepts explicit vpc credentials but can also utilize IAM roles assigned to the instance through Instance Profiles. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More information available `here <http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html>`_. If IAM roles are not used you need to specify them either in a pillar file or in the minion's config file: .. code-block:: yaml vpc.keyid: GKTADJGHEIQSXMKKRBJ08H vpc.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs It's also possible to specify ``key``, ``keyid`` and ``region`` via a profile, either passed in as a dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 .. code-block:: yaml Ensure Apigateway API exists: boto_apigateway.present: - name: myfunction - region: us-east-1 - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import hashlib import logging import os import re # Import Salt Libs import salt.utils.files import salt.utils.json import salt.utils.yaml # Import 3rd-party libs from salt.ext import six log = logging.getLogger(__name__) def __virtual__(): ''' Only load if boto is available. ''' return 'boto_apigateway' if 'boto_apigateway.describe_apis' in __salt__ else False def present(name, api_name, swagger_file, stage_name, api_key_required, lambda_integration_role, lambda_region=None, stage_variables=None, region=None, key=None, keyid=None, profile=None, lambda_funcname_format='{stage}_{api}_{resource}_{method}', authorization_type='NONE', error_response_template=None, response_template=None): ''' Ensure the spcified api_name with the corresponding swaggerfile is deployed to the given stage_name in AWS ApiGateway. this state currently only supports ApiGateway integration with AWS Lambda, and CORS support is handled through a Mock integration. There may be multiple deployments for the API object, each deployment is tagged with a description (i.e. unique label) in pretty printed json format consisting of the following key/values. .. code-block:: text { "api_name": api_name, "swagger_file": basename_of_swagger_file "swagger_file_md5sum": md5sum_of_swagger_file, "swagger_info_object": info_object_content_in_swagger_file } Please note that the name of the lambda function to be integrated will be derived via the provided lambda_funcname_format parameters: - the default lambda_funcname_format is a string with the following substitutable keys: "{stage}_{api}_{resource}_{method}". The user can choose to reorder the known keys. - the stage key corresponds to the stage_name passed in. - the api key corresponds to the api_name passed in. - the resource corresponds to the resource path defined in the passed swagger file. - the method corresponds to the method for a resource path defined in the passed swagger file. For the default lambda_funcname_format, given the following input: .. code-block:: python api_name = ' Test Service' stage_name = 'alpha' basePath = '/api' path = '/a/{b}/c' method = 'POST' We will end up with the following Lambda Function Name that will be looked up: 'test_service_alpha_a_b_c_post' The canconicalization of these input parameters is done in the following order: 1. lambda_funcname_format is formatted with the input parameters as passed, 2. resulting string is stripped for leading/trailing spaces, 3. path parameter's curly braces are removed from the resource path, 4. consecutive spaces and forward slashes in the paths are replaced with '_' 5. consecutive '_' are replaced with '_' Please note that for error response handling, the swagger file must have an error response model with the following schema. The lambda functions should throw exceptions for any non successful responses. An optional pattern field can be specified in errorMessage field to aid the response mapping from Lambda to the proper error return status codes. .. code-block:: yaml Error: type: object properties: stackTrace: type: array items: type: array items: type: string description: call stack errorType: type: string description: error type errorMessage: type: string description: | Error message, will be matched based on pattern. If no pattern is specified, the default pattern used for response mapping will be +*. name The name of the state definition api_name The name of the rest api that we want to ensure exists in AWS API Gateway swagger_file Name of the location of the swagger rest api definition file in YAML format. stage_name Name of the stage we want to be associated with the given api_name and swagger_file definition api_key_required True or False - whether the API Key is required to call API methods lambda_integration_role The name or ARN of the IAM role that the AWS ApiGateway assumes when it executes your lambda function to handle incoming requests lambda_region The region where we expect to find the lambda functions. This is used to determine the region where we should look for the Lambda Function for integration purposes. The region determination is based on the following priority: 1. lambda_region as passed in (is not None) 2. if lambda_region is None, use the region as if a boto_lambda function were executed without explicitly specifying lambda region. 3. if region determined in (2) is different than the region used by boto_apigateway functions, a final lookup will be attempted using the boto_apigateway region. stage_variables A dict with variables and their values, or a pillar key (string) that contains a dict with variables and their values. key and values in the dict must be strings. {'string': 'string'} region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. lambda_funcname_format Please review the earlier example for the usage. The only substituable keys in the funcname format are {stage}, {api}, {resource}, {method}. Any other keys or positional subsitution parameters will be flagged as an invalid input. authorization_type This field can be either 'NONE', or 'AWS_IAM'. This will be applied to all methods in the given swagger spec file. Default is set to 'NONE' error_response_template String value that defines the response template mapping that should be applied in cases error occurs. Refer to AWS documentation for details: http://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-mapping-template-reference.html If set to None, the following default value is used: .. code-block:: text '#set($inputRoot = $input.path(\'$\'))\\n' '{\\n' ' "errorMessage" : "$inputRoot.errorMessage",\\n' ' "errorType" : "$inputRoot.errorType",\\n' ' "stackTrace" : [\\n' '#foreach($stackTrace in $inputRoot.stackTrace)\\n' ' [\\n' '#foreach($elem in $stackTrace)\\n' ' "$elem"\\n' '#if($foreach.hasNext),#end\\n' '#end\\n' ' ]\\n' '#if($foreach.hasNext),#end\\n' '#end\\n' ' ]\\n' .. versionadded:: 2017.7.0 response_template String value that defines the response template mapping applied in case of success (including OPTIONS method) If set to None, empty ({}) template is assumed, which will transfer response from the lambda function as is. .. versionadded:: 2017.7.0 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) # try to open the swagger file and basic validation swagger = _Swagger(api_name, stage_name, lambda_funcname_format, swagger_file, error_response_template, response_template, common_args) # retrieve stage variables stage_vars = _get_stage_variables(stage_variables) # verify if api and stage already exists ret = swagger.verify_api(ret) if ret.get('publish'): # there is a deployment label with signature matching the given api_name, # swagger file name, swagger file md5 sum, and swagger file info object # just reassociate the stage_name to the given deployment label. if __opts__['test']: ret['comment'] = ('[stage: {0}] will be reassociated to an already available ' 'deployment that matched the given [api_name: {1}] ' 'and [swagger_file: {2}].\n' 'Stage variables will be set ' 'to {3}.'.format(stage_name, api_name, swagger_file, stage_vars)) ret['result'] = None return ret return swagger.publish_api(ret, stage_vars) if ret.get('current'): # already at desired state for the stage, swagger_file, and api_name if __opts__['test']: ret['comment'] = ('[stage: {0}] is already at desired state with an associated ' 'deployment matching the given [api_name: {1}] ' 'and [swagger_file: {2}].\n' 'Stage variables will be set ' 'to {3}.'.format(stage_name, api_name, swagger_file, stage_vars)) ret['result'] = None return swagger.overwrite_stage_variables(ret, stage_vars) # there doesn't exist any previous deployments for the given swagger_file, we need # to redeploy the content of the swagger file to the api, models, and resources object # and finally create a new deployment and tie the stage_name to this new deployment if __opts__['test']: ret['comment'] = ('There is no deployment matching the given [api_name: {0}] ' 'and [swagger_file: {1}]. A new deployment will be ' 'created and the [stage_name: {2}] will then be associated ' 'to the newly created deployment.\n' 'Stage variables will be set ' 'to {3}.'.format(api_name, swagger_file, stage_name, stage_vars)) ret['result'] = None return ret ret = swagger.deploy_api(ret) if ret.get('abort'): return ret ret = swagger.deploy_models(ret) if ret.get('abort'): return ret ret = swagger.deploy_resources(ret, api_key_required=api_key_required, lambda_integration_role=lambda_integration_role, lambda_region=lambda_region, authorization_type=authorization_type) if ret.get('abort'): return ret ret = swagger.publish_api(ret, stage_vars) except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret def _get_stage_variables(stage_variables): ''' Helper function to retrieve stage variables from pillars/options, if the input is a string ''' ret = dict() if stage_variables is None: return ret if isinstance(stage_variables, six.string_types): if stage_variables in __opts__: ret = __opts__[stage_variables] master_opts = __pillar__.get('master', {}) if stage_variables in master_opts: ret = master_opts[stage_variables] if stage_variables in __pillar__: ret = __pillar__[stage_variables] elif isinstance(stage_variables, dict): ret = stage_variables if not isinstance(ret, dict): ret = dict() return ret def absent(name, api_name, stage_name, nuke_api=False, region=None, key=None, keyid=None, profile=None): ''' Ensure the stage_name associated with the given api_name deployed by boto_apigateway's present state is removed. If the currently associated deployment to the given stage_name has no other stages associated with it, the deployment will also be removed. name Name of the swagger file in YAML format api_name Name of the rest api on AWS ApiGateway to ensure is absent. stage_name Name of the stage to be removed irrespective of the swagger file content. If the current deployment associated with the stage_name has no other stages associated with it, the deployment will also be removed. nuke_api If True, removes the API itself only if there are no other stages associated with any other deployments once the given stage_name is removed. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) swagger = _Swagger(api_name, stage_name, '', None, None, None, common_args) if not swagger.restApiId: ret['comment'] = '[Rest API: {0}] does not exist.'.format(api_name) return ret if __opts__['test']: if nuke_api: ret['comment'] = ('[stage: {0}] will be deleted, if there are no other ' 'active stages, the [api: {1} will also be ' 'deleted.'.format(stage_name, api_name)) else: ret['comment'] = ('[stage: {0}] will be deleted.'.format(stage_name)) ret['result'] = None return ret ret = swagger.delete_stage(ret) if ret.get('abort'): return ret if nuke_api and swagger.no_more_deployments_remain(): ret = swagger.delete_api(ret) except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret # Helper Swagger Class for swagger version 2.0 API specification def _gen_md5_filehash(fname, *args): ''' helper function to generate a md5 hash of the swagger definition file any extra argument passed to the function is converted to a string and participates in the hash calculation ''' _hash = hashlib.md5() with salt.utils.files.fopen(fname, 'rb') as f: for chunk in iter(lambda: f.read(4096), b''): _hash.update(chunk) for extra_arg in args: _hash.update(six.b(str(extra_arg))) return _hash.hexdigest() def _dict_to_json_pretty(d, sort_keys=True): ''' helper function to generate pretty printed json output ''' return salt.utils.json.dumps(d, indent=4, separators=(',', ': '), sort_keys=sort_keys) # Heuristic on whether or not the property name loosely matches given set of 'interesting' factors # If you are interested in IDs for example, 'id', 'blah_id', 'blahId' would all match def _object_reducer(o, names=('id', 'name', 'path', 'httpMethod', 'statusCode', 'Created', 'Deleted', 'Updated', 'Flushed', 'Associated', 'Disassociated')): ''' Helper function to reduce the amount of information that will be kept in the change log for API GW related return values ''' result = {} if isinstance(o, dict): for k, v in six.iteritems(o): if isinstance(v, dict): reduced = v if k == 'variables' else _object_reducer(v, names) if reduced or _name_matches(k, names): result[k] = reduced elif isinstance(v, list): newlist = [] for val in v: reduced = _object_reducer(val, names) if reduced or _name_matches(k, names): newlist.append(reduced) if newlist: result[k] = newlist else: if _name_matches(k, names): result[k] = v return result def _log_changes(ret, changekey, changevalue): ''' For logging create/update/delete operations to AWS ApiGateway ''' cl = ret['changes'].get('new', []) cl.append({changekey: _object_reducer(changevalue)}) ret['changes']['new'] = cl return ret def _log_error_and_abort(ret, obj): ''' helper function to update errors in the return structure ''' ret['result'] = False ret['abort'] = True if 'error' in obj: ret['comment'] = '{0}'.format(obj.get('error')) return ret class _Swagger(object): ''' this is a helper class that holds the swagger definition file and the associated logic related to how to interpret the file and apply it to AWS Api Gateway. The main interface to the outside world is in deploy_api, deploy_models, and deploy_resources methods. ''' SWAGGER_OBJ_V2_FIELDS = ('swagger', 'info', 'host', 'basePath', 'schemes', 'consumes', 'produces', 'paths', 'definitions', 'parameters', 'responses', 'securityDefinitions', 'security', 'tags', 'externalDocs') # SWAGGER OBJECT V2 Fields that are required by boto apigateway states. SWAGGER_OBJ_V2_FIELDS_REQUIRED = ('swagger', 'info', 'basePath', 'schemes', 'paths', 'definitions') # SWAGGER OPERATION NAMES SWAGGER_OPERATION_NAMES = ('get', 'put', 'post', 'delete', 'options', 'head', 'patch') SWAGGER_VERSIONS_SUPPORTED = ('2.0',) # VENDOR SPECIFIC FIELD PATTERNS VENDOR_EXT_PATTERN = re.compile('^x-') # JSON_SCHEMA_REF JSON_SCHEMA_DRAFT_4 = 'http://json-schema.org/draft-04/schema#' # AWS integration templates for normal and options methods REQUEST_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n' '{\n' '"header_params" : {\n' '#set ($map = $input.params().header)\n' '#foreach( $param in $map.entrySet() )\n' '"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n' '#end\n' '},\n' '"query_params" : {\n' '#set ($map = $input.params().querystring)\n' '#foreach( $param in $map.entrySet() )\n' '"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n' '#end\n' '},\n' '"path_params" : {\n' '#set ($map = $input.params().path)\n' '#foreach( $param in $map.entrySet() )\n' '"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n' '#end\n' '},\n' '"apigw_context" : {\n' '"apiId": "$context.apiId",\n' '"httpMethod": "$context.httpMethod",\n' '"requestId": "$context.requestId",\n' '"resourceId": "$context.resourceId",\n' '"resourcePath": "$context.resourcePath",\n' '"stage": "$context.stage",\n' '"identity": {\n' ' "user":"$context.identity.user",\n' ' "userArn":"$context.identity.userArn",\n' ' "userAgent":"$context.identity.userAgent",\n' ' "sourceIp":"$context.identity.sourceIp",\n' ' "cognitoIdentityId":"$context.identity.cognitoIdentityId",\n' ' "cognitoIdentityPoolId":"$context.identity.cognitoIdentityPoolId",\n' ' "cognitoAuthenticationType":"$context.identity.cognitoAuthenticationType",\n' ' "cognitoAuthenticationProvider":["$util.escapeJavaScript($context.identity.cognitoAuthenticationProvider)"],\n' ' "caller":"$context.identity.caller",\n' ' "apiKey":"$context.identity.apiKey",\n' ' "accountId":"$context.identity.accountId"\n' '}\n' '},\n' '"body_params" : $input.json(\'$\'),\n' '"stage_variables": {\n' '#foreach($variable in $stageVariables.keySet())\n' '"$variable": "$util.escapeJavaScript($stageVariables.get($variable))"\n' '#if($foreach.hasNext), #end\n' '#end\n' '}\n' '}'} REQUEST_OPTION_TEMPLATE = {'application/json': '{"statusCode": 200}'} # AWS integration response template mapping to convert stackTrace part or the error # to a uniform format containing strings only. Swagger does not seem to allow defining # an array of non-uniform types, to it is not possible to create error model to match # exactly what comes out of lambda functions in case of error. RESPONSE_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n' '{\n' ' "errorMessage" : "$inputRoot.errorMessage",\n' ' "errorType" : "$inputRoot.errorType",\n' ' "stackTrace" : [\n' '#foreach($stackTrace in $inputRoot.stackTrace)\n' ' [\n' '#foreach($elem in $stackTrace)\n' ' "$elem"\n' '#if($foreach.hasNext),#end\n' '#end\n' ' ]\n' '#if($foreach.hasNext),#end\n' '#end\n' ' ]\n' '}'} RESPONSE_OPTION_TEMPLATE = {} # This string should not be modified, every API created by this state will carry the description # below. AWS_API_DESCRIPTION = _dict_to_json_pretty({"provisioned_by": "Salt boto_apigateway.present State", "context": "See deployment or stage description"}) class SwaggerParameter(object): ''' This is a helper class for the Swagger Parameter Object ''' LOCATIONS = ('body', 'query', 'header', 'path') def __init__(self, paramdict): self._paramdict = paramdict @property def location(self): ''' returns location in the swagger parameter object ''' _location = self._paramdict.get('in') if _location in _Swagger.SwaggerParameter.LOCATIONS: return _location raise ValueError('Unsupported parameter location: {0} in Parameter Object'.format(_location)) @property def name(self): ''' returns parameter name in the swagger parameter object ''' _name = self._paramdict.get('name') if _name: if self.location == 'header': return 'method.request.header.{0}'.format(_name) elif self.location == 'query': return 'method.request.querystring.{0}'.format(_name) elif self.location == 'path': return 'method.request.path.{0}'.format(_name) return None raise ValueError('Parameter must have a name: {0}'.format(_dict_to_json_pretty(self._paramdict))) @property def schema(self): ''' returns the name of the schema given the reference in the swagger parameter object ''' if self.location == 'body': _schema = self._paramdict.get('schema') if _schema: if '$ref' in _schema: schema_name = _schema.get('$ref').split('/')[-1] return schema_name raise ValueError(('Body parameter must have a JSON reference ' 'to the schema definition due to Amazon API restrictions: {0}'.format(self.name))) raise ValueError('Body parameter must have a schema: {0}'.format(self.name)) return None class SwaggerMethodResponse(object): ''' Helper class for Swagger Method Response Object ''' def __init__(self, r): self._r = r @property def schema(self): ''' returns the name of the schema given the reference in the swagger method response object ''' _schema = self._r.get('schema') if _schema: if '$ref' in _schema: return _schema.get('$ref').split('/')[-1] raise ValueError(('Method response must have a JSON reference ' 'to the schema definition: {0}'.format(_schema))) return None @property def headers(self): ''' returns the headers dictionary in the method response object ''' _headers = self._r.get('headers', {}) return _headers def __init__(self, api_name, stage_name, lambda_funcname_format, swagger_file_path, error_response_template, response_template, common_aws_args): self._api_name = api_name self._stage_name = stage_name self._lambda_funcname_format = lambda_funcname_format self._common_aws_args = common_aws_args self._restApiId = '' self._deploymentId = '' self._error_response_template = error_response_template self._response_template = response_template if swagger_file_path is not None: if os.path.exists(swagger_file_path) and os.path.isfile(swagger_file_path): self._swagger_file = swagger_file_path self._md5_filehash = _gen_md5_filehash(self._swagger_file, error_response_template, response_template) with salt.utils.files.fopen(self._swagger_file, 'rb') as sf: self._cfg = salt.utils.yaml.safe_load(sf) self._swagger_version = '' else: raise IOError('Invalid swagger file path, {0}'.format(swagger_file_path)) self._validate_swagger_file() self._validate_lambda_funcname_format() self._resolve_api_id() def _is_http_error_rescode(self, code): ''' Helper function to determine if the passed code is in the 400~599 range of http error codes ''' return bool(re.match(r'^\s*[45]\d\d\s*$', code)) def _validate_error_response_model(self, paths, mods): ''' Helper function to help validate the convention established in the swagger file on how to handle response code mapping/integration ''' for path, ops in paths: for opname, opobj in six.iteritems(ops): if opname not in _Swagger.SWAGGER_OPERATION_NAMES: continue if 'responses' not in opobj: raise ValueError('missing mandatory responses field in path item object') for rescode, resobj in six.iteritems(opobj.get('responses')): if not self._is_http_error_rescode(str(rescode)): # future lint: disable=blacklisted-function continue # only check for response code from 400-599 if 'schema' not in resobj: raise ValueError('missing schema field in path {0}, ' 'op {1}, response {2}'.format(path, opname, rescode)) schemaobj = resobj.get('schema') if '$ref' not in schemaobj: raise ValueError('missing $ref field under schema in ' 'path {0}, op {1}, response {2}'.format(path, opname, rescode)) schemaobjref = schemaobj.get('$ref', '/') modelname = schemaobjref.split('/')[-1] if modelname not in mods: raise ValueError('model schema {0} reference not found ' 'under /definitions'.format(schemaobjref)) model = mods.get(modelname) if model.get('type') != 'object': raise ValueError('model schema {0} must be type object'.format(modelname)) if 'properties' not in model: raise ValueError('model schema {0} must have properties fields'.format(modelname)) modelprops = model.get('properties') if 'errorMessage' not in modelprops: raise ValueError('model schema {0} must have errorMessage as a property to ' 'match AWS convention. If pattern is not set, .+ will ' 'be used'.format(modelname)) def _validate_lambda_funcname_format(self): ''' Checks if the lambda function name format contains only known elements :return: True on success, ValueError raised on error ''' try: if self._lambda_funcname_format: known_kwargs = dict(stage='', api='', resource='', method='') self._lambda_funcname_format.format(**known_kwargs) return True except Exception: raise ValueError('Invalid lambda_funcname_format {0}. Please review ' 'documentation for known substitutable keys'.format(self._lambda_funcname_format)) def _validate_swagger_file(self): ''' High level check/validation of the input swagger file based on https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md This is not a full schema compliance check, but rather make sure that the input file (YAML or JSON) can be read into a dictionary, and we check for the content of the Swagger Object for version and info. ''' # check for any invalid fields for Swagger Object V2 for field in self._cfg: if (field not in _Swagger.SWAGGER_OBJ_V2_FIELDS and not _Swagger.VENDOR_EXT_PATTERN.match(field)): raise ValueError('Invalid Swagger Object Field: {0}'.format(field)) # check for Required Swagger fields by Saltstack boto apigateway state for field in _Swagger.SWAGGER_OBJ_V2_FIELDS_REQUIRED: if field not in self._cfg: raise ValueError('Missing Swagger Object Field: {0}'.format(field)) # check for Swagger Version self._swagger_version = self._cfg.get('swagger') if self._swagger_version not in _Swagger.SWAGGER_VERSIONS_SUPPORTED: raise ValueError('Unsupported Swagger version: {0},' 'Supported versions are {1}'.format(self._swagger_version, _Swagger.SWAGGER_VERSIONS_SUPPORTED)) log.info(type(self._models)) self._validate_error_response_model(self.paths, self._models()) @property def md5_filehash(self): ''' returns md5 hash for the swagger file ''' return self._md5_filehash @property def info(self): ''' returns the swagger info object as a dictionary ''' info = self._cfg.get('info') if not info: raise ValueError('Info Object has no values') return info @property def info_json(self): ''' returns the swagger info object as a pretty printed json string. ''' return _dict_to_json_pretty(self.info) @property def rest_api_name(self): ''' returns the name of the api ''' return self._api_name @property def rest_api_version(self): ''' returns the version field in the swagger info object ''' version = self.info.get('version') if not version: raise ValueError('Missing version value in Info Object') return version def _models(self): ''' returns an iterator for the models specified in the swagger file ''' models = self._cfg.get('definitions') if not models: raise ValueError('Definitions Object has no values, You need to define them in your swagger file') return models def models(self): ''' generator to return the tuple of model and its schema to create on aws. ''' model_dict = self._build_all_dependencies() while True: model = self._get_model_without_dependencies(model_dict) if not model: break yield (model, self._models().get(model)) @property def paths(self): ''' returns an iterator for the relative resource paths specified in the swagger file ''' paths = self._cfg.get('paths') if not paths: raise ValueError('Paths Object has no values, You need to define them in your swagger file') for path in paths: if not path.startswith('/'): raise ValueError('Path object {0} should start with /. Please fix it'.format(path)) return six.iteritems(paths) @property def basePath(self): ''' returns the base path field as defined in the swagger file ''' basePath = self._cfg.get('basePath', '') return basePath @property def restApiId(self): ''' returns the rest api id as returned by AWS on creation of the rest api ''' return self._restApiId @restApiId.setter def restApiId(self, restApiId): ''' allows the assignment of the rest api id on creation of the rest api ''' self._restApiId = restApiId @property def deployment_label_json(self): ''' this property returns the unique description in pretty printed json for a particular api deployment ''' return _dict_to_json_pretty(self.deployment_label) @property def deployment_label(self): ''' this property returns the deployment label dictionary (mainly used by stage description) ''' label = dict() label['swagger_info_object'] = self.info label['api_name'] = self.rest_api_name label['swagger_file'] = os.path.basename(self._swagger_file) label['swagger_file_md5sum'] = self.md5_filehash return label # methods to interact with boto_apigateway execution modules def _one_or_more_stages_remain(self, deploymentId): ''' Helper function to find whether there are other stages still associated with a deployment ''' stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args).get('stages') return bool(stages) def no_more_deployments_remain(self): ''' Helper function to find whether there are deployments left with stages associated ''' no_more_deployments = True deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId, **self._common_aws_args).get('deployments') if deployments: for deployment in deployments: deploymentId = deployment.get('id') stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args).get('stages') if stages: no_more_deployments = False break return no_more_deployments def _get_current_deployment_id(self): ''' Helper method to find the deployment id that the stage name is currently assocaited with. ''' deploymentId = '' stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, **self._common_aws_args).get('stage') if stage: deploymentId = stage.get('deploymentId') return deploymentId def _get_current_deployment_label(self): ''' Helper method to find the deployment label that the stage_name is currently associated with. ''' deploymentId = self._get_current_deployment_id() deployment = __salt__['boto_apigateway.describe_api_deployment'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args).get('deployment') if deployment: return deployment.get('description') return None def _get_desired_deployment_id(self): ''' Helper method to return the deployment id matching the desired deployment label for this Swagger object based on the given api_name, swagger_file ''' deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId, **self._common_aws_args).get('deployments') if deployments: for deployment in deployments: if deployment.get('description') == self.deployment_label_json: return deployment.get('id') return '' def overwrite_stage_variables(self, ret, stage_variables): ''' overwrite the given stage_name's stage variables with the given stage_variables ''' res = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId, stageName=self._stage_name, variables=stage_variables, **self._common_aws_args) if not res.get('overwrite'): ret['result'] = False ret['abort'] = True ret['comment'] = res.get('error') else: ret = _log_changes(ret, 'overwrite_stage_variables', res.get('stage')) return ret def _set_current_deployment(self, stage_desc_json, stage_variables): ''' Helper method to associate the stage_name to the given deploymentId and make this current ''' stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, **self._common_aws_args).get('stage') if not stage: stage = __salt__['boto_apigateway.create_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, deploymentId=self._deploymentId, description=stage_desc_json, variables=stage_variables, **self._common_aws_args) if not stage.get('stage'): return {'set': False, 'error': stage.get('error')} else: # overwrite the stage variables overwrite = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId, stageName=self._stage_name, variables=stage_variables, **self._common_aws_args) if not overwrite.get('stage'): return {'set': False, 'error': overwrite.get('error')} return __salt__['boto_apigateway.activate_api_deployment'](restApiId=self.restApiId, stageName=self._stage_name, deploymentId=self._deploymentId, **self._common_aws_args) def _resolve_api_id(self): ''' returns an Api Id that matches the given api_name and the hardcoded _Swagger.AWS_API_DESCRIPTION as the api description ''' apis = __salt__['boto_apigateway.describe_apis'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args).get('restapi') if apis: if len(apis) == 1: self.restApiId = apis[0].get('id') else: raise ValueError('Multiple APIs matching given name {0} and ' 'description {1}'.format(self.rest_api_name, self.info_json)) def delete_stage(self, ret): ''' Method to delete the given stage_name. If the current deployment tied to the given stage_name has no other stages associated with it, the deployment will be removed as well ''' deploymentId = self._get_current_deployment_id() if deploymentId: result = __salt__['boto_apigateway.delete_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, **self._common_aws_args) if not result.get('deleted'): ret['abort'] = True ret['result'] = False ret['comment'] = 'delete_stage delete_api_stage, {0}'.format(result.get('error')) else: # check if it is safe to delete the deployment as well. if not self._one_or_more_stages_remain(deploymentId): result = __salt__['boto_apigateway.delete_api_deployment'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args) if not result.get('deleted'): ret['abort'] = True ret['result'] = False ret['comment'] = 'delete_stage delete_api_deployment, {0}'.format(result.get('error')) else: ret['comment'] = 'stage {0} has been deleted.\n'.format(self._stage_name) else: # no matching stage_name/deployment found ret['comment'] = 'stage {0} does not exist'.format(self._stage_name) return ret def verify_api(self, ret): ''' this method helps determine if the given stage_name is already on a deployment label matching the input api_name, swagger_file. If yes, returns abort with comment indicating already at desired state. If not and there is previous deployment labels in AWS matching the given input api_name and swagger file, indicate to the caller that we only need to reassociate stage_name to the previously existing deployment label. ''' if self.restApiId: deployed_label_json = self._get_current_deployment_label() if deployed_label_json == self.deployment_label_json: ret['comment'] = ('Already at desired state, the stage {0} is already at the desired ' 'deployment label:\n{1}'.format(self._stage_name, deployed_label_json)) ret['current'] = True return ret else: self._deploymentId = self._get_desired_deployment_id() if self._deploymentId: ret['publish'] = True return ret def publish_api(self, ret, stage_variables): ''' this method tie the given stage_name to a deployment matching the given swagger_file ''' stage_desc = dict() stage_desc['current_deployment_label'] = self.deployment_label stage_desc_json = _dict_to_json_pretty(stage_desc) if self._deploymentId: # just do a reassociate of stage_name to an already existing deployment res = self._set_current_deployment(stage_desc_json, stage_variables) if not res.get('set'): ret['abort'] = True ret['result'] = False ret['comment'] = res.get('error') else: ret = _log_changes(ret, 'publish_api (reassociate deployment, set stage_variables)', res.get('response')) else: # no deployment existed for the given swagger_file for this Swagger object res = __salt__['boto_apigateway.create_api_deployment'](restApiId=self.restApiId, stageName=self._stage_name, stageDescription=stage_desc_json, description=self.deployment_label_json, variables=stage_variables, **self._common_aws_args) if not res.get('created'): ret['abort'] = True ret['result'] = False ret['comment'] = res.get('error') else: ret = _log_changes(ret, 'publish_api (new deployment)', res.get('deployment')) return ret def _cleanup_api(self): ''' Helper method to clean up resources and models if we detected a change in the swagger file for a stage ''' resources = __salt__['boto_apigateway.describe_api_resources'](restApiId=self.restApiId, **self._common_aws_args) if resources.get('resources'): res = resources.get('resources')[1:] res.reverse() for resource in res: delres = __salt__['boto_apigateway.delete_api_resources'](restApiId=self.restApiId, path=resource.get('path'), **self._common_aws_args) if not delres.get('deleted'): return delres models = __salt__['boto_apigateway.describe_api_models'](restApiId=self.restApiId, **self._common_aws_args) if models.get('models'): for model in models.get('models'): delres = __salt__['boto_apigateway.delete_api_model'](restApiId=self.restApiId, modelName=model.get('name'), **self._common_aws_args) if not delres.get('deleted'): return delres return {'deleted': True} def deploy_api(self, ret): ''' this method create the top level rest api in AWS apigateway ''' if self.restApiId: res = self._cleanup_api() if not res.get('deleted'): ret['comment'] = 'Failed to cleanup restAreId {0}'.format(self.restApiId) ret['abort'] = True ret['result'] = False return ret return ret response = __salt__['boto_apigateway.create_api'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args) if not response.get('created'): ret['result'] = False ret['abort'] = True if 'error' in response: ret['comment'] = 'Failed to create rest api: {0}.'.format(response['error']['message']) return ret self.restApiId = response.get('restapi', {}).get('id') return _log_changes(ret, 'deploy_api', response.get('restapi')) def delete_api(self, ret): ''' Method to delete a Rest Api named defined in the swagger file's Info Object's title value. ret a dictionary for returning status to Saltstack ''' exists_response = __salt__['boto_apigateway.api_exists'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args) if exists_response.get('exists'): if __opts__['test']: ret['comment'] = 'Rest API named {0} is set to be deleted.'.format(self.rest_api_name) ret['result'] = None ret['abort'] = True return ret delete_api_response = __salt__['boto_apigateway.delete_api'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args) if not delete_api_response.get('deleted'): ret['result'] = False ret['abort'] = True if 'error' in delete_api_response: ret['comment'] = 'Failed to delete rest api: {0}.'.format(delete_api_response['error']['message']) return ret ret = _log_changes(ret, 'delete_api', delete_api_response) else: ret['comment'] = ('api already absent for swagger file: ' '{0}, desc: {1}'.format(self.rest_api_name, self.info_json)) return ret def _aws_model_ref_from_swagger_ref(self, r): ''' Helper function to reference models created on aws apigw ''' model_name = r.split('/')[-1] return 'https://apigateway.amazonaws.com/restapis/{0}/models/{1}'.format(self.restApiId, model_name) def _update_schema_to_aws_notation(self, schema): ''' Helper function to map model schema to aws notation ''' result = {} for k, v in schema.items(): if k == '$ref': v = self._aws_model_ref_from_swagger_ref(v) if isinstance(v, dict): v = self._update_schema_to_aws_notation(v) result[k] = v return result def _build_dependent_model_list(self, obj_schema): ''' Helper function to build the list of models the given object schema is referencing. ''' dep_models_list = [] if obj_schema: obj_schema['type'] = obj_schema.get('type', 'object') if obj_schema['type'] == 'array': dep_models_list.extend(self._build_dependent_model_list(obj_schema.get('items', {}))) else: ref = obj_schema.get('$ref') if ref: ref_obj_model = ref.split("/")[-1] ref_obj_schema = self._models().get(ref_obj_model) dep_models_list.extend(self._build_dependent_model_list(ref_obj_schema)) dep_models_list.extend([ref_obj_model]) else: # need to walk each property object properties = obj_schema.get('properties') if properties: for _, prop_obj_schema in six.iteritems(properties): dep_models_list.extend(self._build_dependent_model_list(prop_obj_schema)) return list(set(dep_models_list)) def _build_all_dependencies(self): ''' Helper function to build a map of model to their list of model reference dependencies ''' ret = {} for model, schema in six.iteritems(self._models()): dep_list = self._build_dependent_model_list(schema) ret[model] = dep_list return ret def _get_model_without_dependencies(self, models_dict): ''' Helper function to find the next model that should be created ''' next_model = None if not models_dict: return next_model for model, dependencies in six.iteritems(models_dict): if dependencies == []: next_model = model break if next_model is None: raise ValueError('incomplete model definitions, models in dependency ' 'list not defined: {0}'.format(models_dict)) # remove the model from other depednencies before returning models_dict.pop(next_model) for model, dep_list in six.iteritems(models_dict): if next_model in dep_list: dep_list.remove(next_model) return next_model def deploy_models(self, ret): ''' Method to deploy swagger file's definition objects and associated schema to AWS Apigateway as Models ret a dictionary for returning status to Saltstack ''' for model, schema in self.models(): # add in a few attributes into the model schema that AWS expects # _schema = schema.copy() _schema = self._update_schema_to_aws_notation(schema) _schema.update({'$schema': _Swagger.JSON_SCHEMA_DRAFT_4, 'title': '{0} Schema'.format(model)}) # check to see if model already exists, aws has 2 default models [Empty, Error] # which may need upate with data from swagger file model_exists_response = __salt__['boto_apigateway.api_model_exists'](restApiId=self.restApiId, modelName=model, **self._common_aws_args) if model_exists_response.get('exists'): update_model_schema_response = ( __salt__['boto_apigateway.update_api_model_schema'](restApiId=self.restApiId, modelName=model, schema=_dict_to_json_pretty(_schema), **self._common_aws_args)) if not update_model_schema_response.get('updated'): ret['result'] = False ret['abort'] = True if 'error' in update_model_schema_response: ret['comment'] = ('Failed to update existing model {0} with schema {1}, ' 'error: {2}'.format(model, _dict_to_json_pretty(schema), update_model_schema_response['error']['message'])) return ret ret = _log_changes(ret, 'deploy_models', update_model_schema_response) else: create_model_response = ( __salt__['boto_apigateway.create_api_model'](restApiId=self.restApiId, modelName=model, modelDescription=model, schema=_dict_to_json_pretty(_schema), contentType='application/json', **self._common_aws_args)) if not create_model_response.get('created'): ret['result'] = False ret['abort'] = True if 'error' in create_model_response: ret['comment'] = ('Failed to create model {0}, schema {1}, ' 'error: {2}'.format(model, _dict_to_json_pretty(schema), create_model_response['error']['message'])) return ret ret = _log_changes(ret, 'deploy_models', create_model_response) return ret def _lambda_name(self, resourcePath, httpMethod): ''' Helper method to construct lambda name based on the rule specified in doc string of boto_apigateway.api_present function ''' lambda_name = self._lambda_funcname_format.format(stage=self._stage_name, api=self.rest_api_name, resource=resourcePath, method=httpMethod) lambda_name = lambda_name.strip() lambda_name = re.sub(r'{|}', '', lambda_name) lambda_name = re.sub(r'\s+|/', '_', lambda_name).lower() return re.sub(r'_+', '_', lambda_name) def _lambda_uri(self, lambda_name, lambda_region): ''' Helper Method to construct the lambda uri for use in method integration ''' profile = self._common_aws_args.get('profile') region = self._common_aws_args.get('region') lambda_region = __utils__['boto3.get_region']('lambda', lambda_region, profile) apigw_region = __utils__['boto3.get_region']('apigateway', region, profile) lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args) if lambda_region != apigw_region: if not lambda_desc.get('function'): # try look up in the same region as the apigateway as well if previous lookup failed lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args) if not lambda_desc.get('function'): raise ValueError('Could not find lambda function {0} in ' 'regions [{1}, {2}].'.format(lambda_name, lambda_region, apigw_region)) lambda_arn = lambda_desc.get('function').get('FunctionArn') lambda_uri = ('arn:aws:apigateway:{0}:lambda:path/2015-03-31' '/functions/{1}/invocations'.format(apigw_region, lambda_arn)) return lambda_uri def _parse_method_data(self, method_name, method_data): ''' Helper function to construct the method request params, models, request_templates and integration_type values needed to configure method request integration/mappings. ''' method_params = {} method_models = {} if 'parameters' in method_data: for param in method_data['parameters']: p = _Swagger.SwaggerParameter(param) if p.name: method_params[p.name] = True if p.schema: method_models['application/json'] = p.schema request_templates = _Swagger.REQUEST_OPTION_TEMPLATE if method_name == 'options' else _Swagger.REQUEST_TEMPLATE integration_type = "MOCK" if method_name == 'options' else "AWS" return {'params': method_params, 'models': method_models, 'request_templates': request_templates, 'integration_type': integration_type} def _find_patterns(self, o): result = [] if isinstance(o, dict): for k, v in six.iteritems(o): if isinstance(v, dict): result.extend(self._find_patterns(v)) else: if k == 'pattern': result.append(v) return result def _get_pattern_for_schema(self, schema_name, httpStatus): ''' returns the pattern specified in a response schema ''' defaultPattern = '.+' if self._is_http_error_rescode(httpStatus) else '.*' model = self._models().get(schema_name) patterns = self._find_patterns(model) return patterns[0] if patterns else defaultPattern def _get_response_template(self, method_name, http_status): if method_name == 'options' or not self._is_http_error_rescode(http_status): response_templates = {'application/json': self._response_template} \ if self._response_template else self.RESPONSE_OPTION_TEMPLATE else: response_templates = {'application/json': self._error_response_template} \ if self._error_response_template else self.RESPONSE_TEMPLATE return response_templates def _parse_method_response(self, method_name, method_response, httpStatus): ''' Helper function to construct the method response params, models, and integration_params values needed to configure method response integration/mappings. ''' method_response_models = {} method_response_pattern = '.*' if method_response.schema: method_response_models['application/json'] = method_response.schema method_response_pattern = self._get_pattern_for_schema(method_response.schema, httpStatus) method_response_params = {} method_integration_response_params = {} for header in method_response.headers: response_header = 'method.response.header.{0}'.format(header) method_response_params[response_header] = False header_data = method_response.headers.get(header) method_integration_response_params[response_header] = ( "'{0}'".format(header_data.get('default')) if 'default' in header_data else "'*'") response_templates = self._get_response_template(method_name, httpStatus) return {'params': method_response_params, 'models': method_response_models, 'integration_params': method_integration_response_params, 'pattern': method_response_pattern, 'response_templates': response_templates} def _deploy_method(self, ret, resource_path, method_name, method_data, api_key_required, lambda_integration_role, lambda_region, authorization_type): ''' Method to create a method for the given resource path, along with its associated request and response integrations. ret a dictionary for returning status to Saltstack resource_path the full resource path where the named method_name will be associated with. method_name a string that is one of the following values: 'delete', 'get', 'head', 'options', 'patch', 'post', 'put' method_data the value dictionary for this method in the swagger definition file. api_key_required True or False, whether api key is required to access this method. lambda_integration_role name of the IAM role or IAM role arn that Api Gateway will assume when executing the associated lambda function lambda_region the region for the lambda function that Api Gateway will integrate to. authorization_type 'NONE' or 'AWS_IAM' ''' method = self._parse_method_data(method_name.lower(), method_data) # for options method to enable CORS, api_key_required will be set to False always. # authorization_type will be set to 'NONE' always. if method_name.lower() == 'options': api_key_required = False authorization_type = 'NONE' m = __salt__['boto_apigateway.create_api_method'](restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), authorizationType=authorization_type, apiKeyRequired=api_key_required, requestParameters=method.get('params'), requestModels=method.get('models'), **self._common_aws_args) if not m.get('created'): ret = _log_error_and_abort(ret, m) return ret ret = _log_changes(ret, '_deploy_method.create_api_method', m) lambda_uri = "" if method_name.lower() != 'options': lambda_uri = self._lambda_uri(self._lambda_name(resource_path, method_name), lambda_region=lambda_region) # NOTE: integration method is set to POST always, as otherwise AWS makes wrong assumptions # about the intent of the call. HTTP method will be passed to lambda as part of the API gateway context integration = ( __salt__['boto_apigateway.create_api_integration'](restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), integrationType=method.get('integration_type'), integrationHttpMethod='POST', uri=lambda_uri, credentials=lambda_integration_role, requestTemplates=method.get('request_templates'), **self._common_aws_args)) if not integration.get('created'): ret = _log_error_and_abort(ret, integration) return ret ret = _log_changes(ret, '_deploy_method.create_api_integration', integration) if 'responses' in method_data: for response, response_data in six.iteritems(method_data['responses']): httpStatus = str(response) # future lint: disable=blacklisted-function method_response = self._parse_method_response(method_name.lower(), _Swagger.SwaggerMethodResponse(response_data), httpStatus) mr = __salt__['boto_apigateway.create_api_method_response']( restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), statusCode=httpStatus, responseParameters=method_response.get('params'), responseModels=method_response.get('models'), **self._common_aws_args) if not mr.get('created'): ret = _log_error_and_abort(ret, mr) return ret ret = _log_changes(ret, '_deploy_method.create_api_method_response', mr) mir = __salt__['boto_apigateway.create_api_integration_response']( restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), statusCode=httpStatus, selectionPattern=method_response.get('pattern'), responseParameters=method_response.get('integration_params'), responseTemplates=method_response.get('response_templates'), **self._common_aws_args) if not mir.get('created'): ret = _log_error_and_abort(ret, mir) return ret ret = _log_changes(ret, '_deploy_method.create_api_integration_response', mir) else: raise ValueError('No responses specified for {0} {1}'.format(resource_path, method_name)) return ret def deploy_resources(self, ret, api_key_required, lambda_integration_role, lambda_region, authorization_type): ''' Method to deploy resources defined in the swagger file. ret a dictionary for returning status to Saltstack api_key_required True or False, whether api key is required to access this method. lambda_integration_role name of the IAM role or IAM role arn that Api Gateway will assume when executing the associated lambda function lambda_region the region for the lambda function that Api Gateway will integrate to. authorization_type 'NONE' or 'AWS_IAM' ''' for path, pathData in self.paths: resource = __salt__['boto_apigateway.create_api_resources'](restApiId=self.restApiId, path=path, **self._common_aws_args) if not resource.get('created'): ret = _log_error_and_abort(ret, resource) return ret ret = _log_changes(ret, 'deploy_resources', resource) for method, method_data in six.iteritems(pathData): if method in _Swagger.SWAGGER_OPERATION_NAMES: ret = self._deploy_method(ret, path, method, method_data, api_key_required, lambda_integration_role, lambda_region, authorization_type) return ret def usage_plan_present(name, plan_name, description=None, throttle=None, quota=None, region=None, key=None, keyid=None, profile=None): ''' Ensure the spcifieda usage plan with the corresponding metrics is deployed .. versionadded:: 2017.7.0 name name of the state plan_name [Required] name of the usage plan throttle [Optional] throttling parameters expressed as a dictionary. If provided, at least one of the throttling parameters must be present rateLimit rate per second at which capacity bucket is populated burstLimit maximum rate allowed quota [Optional] quota on the number of api calls permitted by the plan. If provided, limit and period must be present limit [Required] number of calls permitted per quota period offset [Optional] number of calls to be subtracted from the limit at the beginning of the period period [Required] period to which quota applies. Must be DAY, WEEK or MONTH .. code-block:: yaml UsagePlanPresent: boto_apigateway.usage_plan_present: - plan_name: my_usage_plan - throttle: rateLimit: 70 burstLimit: 100 - quota: limit: 1000 offset: 0 period: DAY - profile: my_profile ''' func_params = locals() ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) existing = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args) if 'error' in existing: ret['result'] = False ret['comment'] = 'Failed to describe existing usage plans' return ret if not existing['plans']: # plan does not exist, we need to create it if __opts__['test']: ret['comment'] = 'a new usage plan {0} would be created'.format(plan_name) ret['result'] = None return ret result = __salt__['boto_apigateway.create_usage_plan'](name=plan_name, description=description, throttle=throttle, quota=quota, **common_args) if 'error' in result: ret['result'] = False ret['comment'] = 'Failed to create a usage plan {0}, {1}'.format(plan_name, result['error']) return ret ret['changes']['old'] = {'plan': None} ret['comment'] = 'A new usage plan {0} has been created'.format(plan_name) else: # need an existing plan modified to match given value plan = existing['plans'][0] needs_updating = False modifiable_params = (('throttle', ('rateLimit', 'burstLimit')), ('quota', ('limit', 'offset', 'period'))) for p, fields in modifiable_params: for f in fields: actual_param = {} if func_params.get(p) is None else func_params.get(p) if plan.get(p, {}).get(f, None) != actual_param.get(f, None): needs_updating = True break if not needs_updating: ret['comment'] = 'usage plan {0} is already in a correct state'.format(plan_name) ret['result'] = True return ret if __opts__['test']: ret['comment'] = 'a new usage plan {0} would be updated'.format(plan_name) ret['result'] = None return ret result = __salt__['boto_apigateway.update_usage_plan'](plan['id'], throttle=throttle, quota=quota, **common_args) if 'error' in result: ret['result'] = False ret['comment'] = 'Failed to update a usage plan {0}, {1}'.format(plan_name, result['error']) return ret ret['changes']['old'] = {'plan': plan} ret['comment'] = 'usage plan {0} has been updated'.format(plan_name) newstate = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args) if 'error' in existing: ret['result'] = False ret['comment'] = 'Failed to describe existing usage plans after updates' return ret ret['changes']['new'] = {'plan': newstate['plans'][0]} except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret def usage_plan_absent(name, plan_name, region=None, key=None, keyid=None, profile=None): ''' Ensures usage plan identified by name is no longer present .. versionadded:: 2017.7.0 name name of the state plan_name name of the plan to remove .. code-block:: yaml usage plan absent: boto_apigateway.usage_plan_absent: - plan_name: my_usage_plan - profile: my_profile ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) existing = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args) if 'error' in existing: ret['result'] = False ret['comment'] = 'Failed to describe existing usage plans' return ret if not existing['plans']: ret['comment'] = 'Usage plan {0} does not exist already'.format(plan_name) return ret if __opts__['test']: ret['comment'] = 'Usage plan {0} exists and would be deleted'.format(plan_name) ret['result'] = None return ret plan_id = existing['plans'][0]['id'] result = __salt__['boto_apigateway.delete_usage_plan'](plan_id, **common_args) if 'error' in result: ret['result'] = False ret['comment'] = 'Failed to delete usage plan {0}, {1}'.format(plan_name, result) return ret ret['comment'] = 'Usage plan {0} has been deleted'.format(plan_name) ret['changes']['old'] = {'plan': existing['plans'][0]} ret['changes']['new'] = {'plan': None} except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret def usage_plan_association_present(name, plan_name, api_stages, region=None, key=None, keyid=None, profile=None): ''' Ensures usage plan identified by name is added to provided api_stages .. versionadded:: 2017.7.0 name name of the state plan_name name of the plan to use api_stages list of dictionaries, where each dictionary consists of the following keys: apiId apiId of the api to attach usage plan to stage stage name of the api to attach usage plan to .. code-block:: yaml UsagePlanAssociationPresent: boto_apigateway.usage_plan_association_present: - plan_name: my_plan - api_stages: - apiId: 9kb0404ec0 stage: my_stage - apiId: l9v7o2aj90 stage: my_stage - profile: my_profile ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) existing = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args) if 'error' in existing: ret['result'] = False ret['comment'] = 'Failed to describe existing usage plans' return ret if not existing['plans']: ret['comment'] = 'Usage plan {0} does not exist'.format(plan_name) ret['result'] = False return ret if len(existing['plans']) != 1: ret['comment'] = 'There are multiple usage plans with the same name - it is not supported' ret['result'] = False return ret plan = existing['plans'][0] plan_id = plan['id'] plan_stages = plan.get('apiStages', []) stages_to_add = [] for api in api_stages: if api not in plan_stages: stages_to_add.append(api) if not stages_to_add: ret['comment'] = 'Usage plan is already asssociated to all api stages' return ret result = __salt__['boto_apigateway.attach_usage_plan_to_apis'](plan_id, stages_to_add, **common_args) if 'error' in result: ret['comment'] = 'Failed to associate a usage plan {0} to the apis {1}, {2}'.format(plan_name, stages_to_add, result['error']) ret['result'] = False return ret ret['comment'] = 'successfully associated usage plan to apis' ret['changes']['old'] = plan_stages ret['changes']['new'] = result.get('result', {}).get('apiStages', []) except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret def usage_plan_association_absent(name, plan_name, api_stages, region=None, key=None, keyid=None, profile=None): ''' Ensures usage plan identified by name is removed from provided api_stages If a plan is associated to stages not listed in api_stages parameter, those associations remain intact. .. versionadded:: 2017.7.0 name name of the state plan_name name of the plan to use api_stages list of dictionaries, where each dictionary consists of the following keys: apiId apiId of the api to detach usage plan from stage stage name of the api to detach usage plan from .. code-block:: yaml UsagePlanAssociationAbsent: boto_apigateway.usage_plan_association_absent: - plan_name: my_plan - api_stages: - apiId: 9kb0404ec0 stage: my_stage - apiId: l9v7o2aj90 stage: my_stage - profile: my_profile ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) existing = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args) if 'error' in existing: ret['result'] = False ret['comment'] = 'Failed to describe existing usage plans' return ret if not existing['plans']: ret['comment'] = 'Usage plan {0} does not exist'.format(plan_name) ret['result'] = False return ret if len(existing['plans']) != 1: ret['comment'] = 'There are multiple usage plans with the same name - it is not supported' ret['result'] = False return ret plan = existing['plans'][0] plan_id = plan['id'] plan_stages = plan.get('apiStages', []) if not plan_stages: ret['comment'] = 'Usage plan {0} has no associated stages already'.format(plan_name) return ret stages_to_remove = [] for api in api_stages: if api in plan_stages: stages_to_remove.append(api) if not stages_to_remove: ret['comment'] = 'Usage plan is already not asssociated to any api stages' return ret result = __salt__['boto_apigateway.detach_usage_plan_from_apis'](plan_id, stages_to_remove, **common_args) if 'error' in result: ret['comment'] = 'Failed to disassociate a usage plan {0} from the apis {1}, {2}'.format(plan_name, stages_to_remove, result['error']) ret['result'] = False return ret ret['comment'] = 'successfully disassociated usage plan from apis' ret['changes']['old'] = plan_stages ret['changes']['new'] = result.get('result', {}).get('apiStages', []) except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret
saltstack/salt
salt/states/boto_apigateway.py
_object_reducer
python
def _object_reducer(o, names=('id', 'name', 'path', 'httpMethod', 'statusCode', 'Created', 'Deleted', 'Updated', 'Flushed', 'Associated', 'Disassociated')): ''' Helper function to reduce the amount of information that will be kept in the change log for API GW related return values ''' result = {} if isinstance(o, dict): for k, v in six.iteritems(o): if isinstance(v, dict): reduced = v if k == 'variables' else _object_reducer(v, names) if reduced or _name_matches(k, names): result[k] = reduced elif isinstance(v, list): newlist = [] for val in v: reduced = _object_reducer(val, names) if reduced or _name_matches(k, names): newlist.append(reduced) if newlist: result[k] = newlist else: if _name_matches(k, names): result[k] = v return result
Helper function to reduce the amount of information that will be kept in the change log for API GW related return values
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_apigateway.py#L486-L511
null
# -*- coding: utf-8 -*- ''' Manage Apigateway Rest APIs =========================== .. versionadded:: 2016.11.0 :depends: - boto >= 2.8.0 - boto3 >= 1.2.1 - botocore >= 1.4.49 Create and destroy rest apis depending on a swagger version 2 definition file. Be aware that this interacts with Amazon's services, and so may incur charges. This module uses ``boto3``, which can be installed via package, or pip. This module accepts explicit vpc credentials but can also utilize IAM roles assigned to the instance through Instance Profiles. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More information available `here <http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html>`_. If IAM roles are not used you need to specify them either in a pillar file or in the minion's config file: .. code-block:: yaml vpc.keyid: GKTADJGHEIQSXMKKRBJ08H vpc.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs It's also possible to specify ``key``, ``keyid`` and ``region`` via a profile, either passed in as a dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 .. code-block:: yaml Ensure Apigateway API exists: boto_apigateway.present: - name: myfunction - region: us-east-1 - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import hashlib import logging import os import re # Import Salt Libs import salt.utils.files import salt.utils.json import salt.utils.yaml # Import 3rd-party libs from salt.ext import six log = logging.getLogger(__name__) def __virtual__(): ''' Only load if boto is available. ''' return 'boto_apigateway' if 'boto_apigateway.describe_apis' in __salt__ else False def present(name, api_name, swagger_file, stage_name, api_key_required, lambda_integration_role, lambda_region=None, stage_variables=None, region=None, key=None, keyid=None, profile=None, lambda_funcname_format='{stage}_{api}_{resource}_{method}', authorization_type='NONE', error_response_template=None, response_template=None): ''' Ensure the spcified api_name with the corresponding swaggerfile is deployed to the given stage_name in AWS ApiGateway. this state currently only supports ApiGateway integration with AWS Lambda, and CORS support is handled through a Mock integration. There may be multiple deployments for the API object, each deployment is tagged with a description (i.e. unique label) in pretty printed json format consisting of the following key/values. .. code-block:: text { "api_name": api_name, "swagger_file": basename_of_swagger_file "swagger_file_md5sum": md5sum_of_swagger_file, "swagger_info_object": info_object_content_in_swagger_file } Please note that the name of the lambda function to be integrated will be derived via the provided lambda_funcname_format parameters: - the default lambda_funcname_format is a string with the following substitutable keys: "{stage}_{api}_{resource}_{method}". The user can choose to reorder the known keys. - the stage key corresponds to the stage_name passed in. - the api key corresponds to the api_name passed in. - the resource corresponds to the resource path defined in the passed swagger file. - the method corresponds to the method for a resource path defined in the passed swagger file. For the default lambda_funcname_format, given the following input: .. code-block:: python api_name = ' Test Service' stage_name = 'alpha' basePath = '/api' path = '/a/{b}/c' method = 'POST' We will end up with the following Lambda Function Name that will be looked up: 'test_service_alpha_a_b_c_post' The canconicalization of these input parameters is done in the following order: 1. lambda_funcname_format is formatted with the input parameters as passed, 2. resulting string is stripped for leading/trailing spaces, 3. path parameter's curly braces are removed from the resource path, 4. consecutive spaces and forward slashes in the paths are replaced with '_' 5. consecutive '_' are replaced with '_' Please note that for error response handling, the swagger file must have an error response model with the following schema. The lambda functions should throw exceptions for any non successful responses. An optional pattern field can be specified in errorMessage field to aid the response mapping from Lambda to the proper error return status codes. .. code-block:: yaml Error: type: object properties: stackTrace: type: array items: type: array items: type: string description: call stack errorType: type: string description: error type errorMessage: type: string description: | Error message, will be matched based on pattern. If no pattern is specified, the default pattern used for response mapping will be +*. name The name of the state definition api_name The name of the rest api that we want to ensure exists in AWS API Gateway swagger_file Name of the location of the swagger rest api definition file in YAML format. stage_name Name of the stage we want to be associated with the given api_name and swagger_file definition api_key_required True or False - whether the API Key is required to call API methods lambda_integration_role The name or ARN of the IAM role that the AWS ApiGateway assumes when it executes your lambda function to handle incoming requests lambda_region The region where we expect to find the lambda functions. This is used to determine the region where we should look for the Lambda Function for integration purposes. The region determination is based on the following priority: 1. lambda_region as passed in (is not None) 2. if lambda_region is None, use the region as if a boto_lambda function were executed without explicitly specifying lambda region. 3. if region determined in (2) is different than the region used by boto_apigateway functions, a final lookup will be attempted using the boto_apigateway region. stage_variables A dict with variables and their values, or a pillar key (string) that contains a dict with variables and their values. key and values in the dict must be strings. {'string': 'string'} region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. lambda_funcname_format Please review the earlier example for the usage. The only substituable keys in the funcname format are {stage}, {api}, {resource}, {method}. Any other keys or positional subsitution parameters will be flagged as an invalid input. authorization_type This field can be either 'NONE', or 'AWS_IAM'. This will be applied to all methods in the given swagger spec file. Default is set to 'NONE' error_response_template String value that defines the response template mapping that should be applied in cases error occurs. Refer to AWS documentation for details: http://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-mapping-template-reference.html If set to None, the following default value is used: .. code-block:: text '#set($inputRoot = $input.path(\'$\'))\\n' '{\\n' ' "errorMessage" : "$inputRoot.errorMessage",\\n' ' "errorType" : "$inputRoot.errorType",\\n' ' "stackTrace" : [\\n' '#foreach($stackTrace in $inputRoot.stackTrace)\\n' ' [\\n' '#foreach($elem in $stackTrace)\\n' ' "$elem"\\n' '#if($foreach.hasNext),#end\\n' '#end\\n' ' ]\\n' '#if($foreach.hasNext),#end\\n' '#end\\n' ' ]\\n' .. versionadded:: 2017.7.0 response_template String value that defines the response template mapping applied in case of success (including OPTIONS method) If set to None, empty ({}) template is assumed, which will transfer response from the lambda function as is. .. versionadded:: 2017.7.0 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) # try to open the swagger file and basic validation swagger = _Swagger(api_name, stage_name, lambda_funcname_format, swagger_file, error_response_template, response_template, common_args) # retrieve stage variables stage_vars = _get_stage_variables(stage_variables) # verify if api and stage already exists ret = swagger.verify_api(ret) if ret.get('publish'): # there is a deployment label with signature matching the given api_name, # swagger file name, swagger file md5 sum, and swagger file info object # just reassociate the stage_name to the given deployment label. if __opts__['test']: ret['comment'] = ('[stage: {0}] will be reassociated to an already available ' 'deployment that matched the given [api_name: {1}] ' 'and [swagger_file: {2}].\n' 'Stage variables will be set ' 'to {3}.'.format(stage_name, api_name, swagger_file, stage_vars)) ret['result'] = None return ret return swagger.publish_api(ret, stage_vars) if ret.get('current'): # already at desired state for the stage, swagger_file, and api_name if __opts__['test']: ret['comment'] = ('[stage: {0}] is already at desired state with an associated ' 'deployment matching the given [api_name: {1}] ' 'and [swagger_file: {2}].\n' 'Stage variables will be set ' 'to {3}.'.format(stage_name, api_name, swagger_file, stage_vars)) ret['result'] = None return swagger.overwrite_stage_variables(ret, stage_vars) # there doesn't exist any previous deployments for the given swagger_file, we need # to redeploy the content of the swagger file to the api, models, and resources object # and finally create a new deployment and tie the stage_name to this new deployment if __opts__['test']: ret['comment'] = ('There is no deployment matching the given [api_name: {0}] ' 'and [swagger_file: {1}]. A new deployment will be ' 'created and the [stage_name: {2}] will then be associated ' 'to the newly created deployment.\n' 'Stage variables will be set ' 'to {3}.'.format(api_name, swagger_file, stage_name, stage_vars)) ret['result'] = None return ret ret = swagger.deploy_api(ret) if ret.get('abort'): return ret ret = swagger.deploy_models(ret) if ret.get('abort'): return ret ret = swagger.deploy_resources(ret, api_key_required=api_key_required, lambda_integration_role=lambda_integration_role, lambda_region=lambda_region, authorization_type=authorization_type) if ret.get('abort'): return ret ret = swagger.publish_api(ret, stage_vars) except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret def _get_stage_variables(stage_variables): ''' Helper function to retrieve stage variables from pillars/options, if the input is a string ''' ret = dict() if stage_variables is None: return ret if isinstance(stage_variables, six.string_types): if stage_variables in __opts__: ret = __opts__[stage_variables] master_opts = __pillar__.get('master', {}) if stage_variables in master_opts: ret = master_opts[stage_variables] if stage_variables in __pillar__: ret = __pillar__[stage_variables] elif isinstance(stage_variables, dict): ret = stage_variables if not isinstance(ret, dict): ret = dict() return ret def absent(name, api_name, stage_name, nuke_api=False, region=None, key=None, keyid=None, profile=None): ''' Ensure the stage_name associated with the given api_name deployed by boto_apigateway's present state is removed. If the currently associated deployment to the given stage_name has no other stages associated with it, the deployment will also be removed. name Name of the swagger file in YAML format api_name Name of the rest api on AWS ApiGateway to ensure is absent. stage_name Name of the stage to be removed irrespective of the swagger file content. If the current deployment associated with the stage_name has no other stages associated with it, the deployment will also be removed. nuke_api If True, removes the API itself only if there are no other stages associated with any other deployments once the given stage_name is removed. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) swagger = _Swagger(api_name, stage_name, '', None, None, None, common_args) if not swagger.restApiId: ret['comment'] = '[Rest API: {0}] does not exist.'.format(api_name) return ret if __opts__['test']: if nuke_api: ret['comment'] = ('[stage: {0}] will be deleted, if there are no other ' 'active stages, the [api: {1} will also be ' 'deleted.'.format(stage_name, api_name)) else: ret['comment'] = ('[stage: {0}] will be deleted.'.format(stage_name)) ret['result'] = None return ret ret = swagger.delete_stage(ret) if ret.get('abort'): return ret if nuke_api and swagger.no_more_deployments_remain(): ret = swagger.delete_api(ret) except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret # Helper Swagger Class for swagger version 2.0 API specification def _gen_md5_filehash(fname, *args): ''' helper function to generate a md5 hash of the swagger definition file any extra argument passed to the function is converted to a string and participates in the hash calculation ''' _hash = hashlib.md5() with salt.utils.files.fopen(fname, 'rb') as f: for chunk in iter(lambda: f.read(4096), b''): _hash.update(chunk) for extra_arg in args: _hash.update(six.b(str(extra_arg))) return _hash.hexdigest() def _dict_to_json_pretty(d, sort_keys=True): ''' helper function to generate pretty printed json output ''' return salt.utils.json.dumps(d, indent=4, separators=(',', ': '), sort_keys=sort_keys) # Heuristic on whether or not the property name loosely matches given set of 'interesting' factors # If you are interested in IDs for example, 'id', 'blah_id', 'blahId' would all match def _name_matches(name, matches): ''' Helper function to see if given name has any of the patterns in given matches ''' for m in matches: if name.endswith(m): return True if name.lower().endswith('_' + m.lower()): return True if name.lower() == m.lower(): return True return False def _log_changes(ret, changekey, changevalue): ''' For logging create/update/delete operations to AWS ApiGateway ''' cl = ret['changes'].get('new', []) cl.append({changekey: _object_reducer(changevalue)}) ret['changes']['new'] = cl return ret def _log_error_and_abort(ret, obj): ''' helper function to update errors in the return structure ''' ret['result'] = False ret['abort'] = True if 'error' in obj: ret['comment'] = '{0}'.format(obj.get('error')) return ret class _Swagger(object): ''' this is a helper class that holds the swagger definition file and the associated logic related to how to interpret the file and apply it to AWS Api Gateway. The main interface to the outside world is in deploy_api, deploy_models, and deploy_resources methods. ''' SWAGGER_OBJ_V2_FIELDS = ('swagger', 'info', 'host', 'basePath', 'schemes', 'consumes', 'produces', 'paths', 'definitions', 'parameters', 'responses', 'securityDefinitions', 'security', 'tags', 'externalDocs') # SWAGGER OBJECT V2 Fields that are required by boto apigateway states. SWAGGER_OBJ_V2_FIELDS_REQUIRED = ('swagger', 'info', 'basePath', 'schemes', 'paths', 'definitions') # SWAGGER OPERATION NAMES SWAGGER_OPERATION_NAMES = ('get', 'put', 'post', 'delete', 'options', 'head', 'patch') SWAGGER_VERSIONS_SUPPORTED = ('2.0',) # VENDOR SPECIFIC FIELD PATTERNS VENDOR_EXT_PATTERN = re.compile('^x-') # JSON_SCHEMA_REF JSON_SCHEMA_DRAFT_4 = 'http://json-schema.org/draft-04/schema#' # AWS integration templates for normal and options methods REQUEST_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n' '{\n' '"header_params" : {\n' '#set ($map = $input.params().header)\n' '#foreach( $param in $map.entrySet() )\n' '"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n' '#end\n' '},\n' '"query_params" : {\n' '#set ($map = $input.params().querystring)\n' '#foreach( $param in $map.entrySet() )\n' '"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n' '#end\n' '},\n' '"path_params" : {\n' '#set ($map = $input.params().path)\n' '#foreach( $param in $map.entrySet() )\n' '"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n' '#end\n' '},\n' '"apigw_context" : {\n' '"apiId": "$context.apiId",\n' '"httpMethod": "$context.httpMethod",\n' '"requestId": "$context.requestId",\n' '"resourceId": "$context.resourceId",\n' '"resourcePath": "$context.resourcePath",\n' '"stage": "$context.stage",\n' '"identity": {\n' ' "user":"$context.identity.user",\n' ' "userArn":"$context.identity.userArn",\n' ' "userAgent":"$context.identity.userAgent",\n' ' "sourceIp":"$context.identity.sourceIp",\n' ' "cognitoIdentityId":"$context.identity.cognitoIdentityId",\n' ' "cognitoIdentityPoolId":"$context.identity.cognitoIdentityPoolId",\n' ' "cognitoAuthenticationType":"$context.identity.cognitoAuthenticationType",\n' ' "cognitoAuthenticationProvider":["$util.escapeJavaScript($context.identity.cognitoAuthenticationProvider)"],\n' ' "caller":"$context.identity.caller",\n' ' "apiKey":"$context.identity.apiKey",\n' ' "accountId":"$context.identity.accountId"\n' '}\n' '},\n' '"body_params" : $input.json(\'$\'),\n' '"stage_variables": {\n' '#foreach($variable in $stageVariables.keySet())\n' '"$variable": "$util.escapeJavaScript($stageVariables.get($variable))"\n' '#if($foreach.hasNext), #end\n' '#end\n' '}\n' '}'} REQUEST_OPTION_TEMPLATE = {'application/json': '{"statusCode": 200}'} # AWS integration response template mapping to convert stackTrace part or the error # to a uniform format containing strings only. Swagger does not seem to allow defining # an array of non-uniform types, to it is not possible to create error model to match # exactly what comes out of lambda functions in case of error. RESPONSE_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n' '{\n' ' "errorMessage" : "$inputRoot.errorMessage",\n' ' "errorType" : "$inputRoot.errorType",\n' ' "stackTrace" : [\n' '#foreach($stackTrace in $inputRoot.stackTrace)\n' ' [\n' '#foreach($elem in $stackTrace)\n' ' "$elem"\n' '#if($foreach.hasNext),#end\n' '#end\n' ' ]\n' '#if($foreach.hasNext),#end\n' '#end\n' ' ]\n' '}'} RESPONSE_OPTION_TEMPLATE = {} # This string should not be modified, every API created by this state will carry the description # below. AWS_API_DESCRIPTION = _dict_to_json_pretty({"provisioned_by": "Salt boto_apigateway.present State", "context": "See deployment or stage description"}) class SwaggerParameter(object): ''' This is a helper class for the Swagger Parameter Object ''' LOCATIONS = ('body', 'query', 'header', 'path') def __init__(self, paramdict): self._paramdict = paramdict @property def location(self): ''' returns location in the swagger parameter object ''' _location = self._paramdict.get('in') if _location in _Swagger.SwaggerParameter.LOCATIONS: return _location raise ValueError('Unsupported parameter location: {0} in Parameter Object'.format(_location)) @property def name(self): ''' returns parameter name in the swagger parameter object ''' _name = self._paramdict.get('name') if _name: if self.location == 'header': return 'method.request.header.{0}'.format(_name) elif self.location == 'query': return 'method.request.querystring.{0}'.format(_name) elif self.location == 'path': return 'method.request.path.{0}'.format(_name) return None raise ValueError('Parameter must have a name: {0}'.format(_dict_to_json_pretty(self._paramdict))) @property def schema(self): ''' returns the name of the schema given the reference in the swagger parameter object ''' if self.location == 'body': _schema = self._paramdict.get('schema') if _schema: if '$ref' in _schema: schema_name = _schema.get('$ref').split('/')[-1] return schema_name raise ValueError(('Body parameter must have a JSON reference ' 'to the schema definition due to Amazon API restrictions: {0}'.format(self.name))) raise ValueError('Body parameter must have a schema: {0}'.format(self.name)) return None class SwaggerMethodResponse(object): ''' Helper class for Swagger Method Response Object ''' def __init__(self, r): self._r = r @property def schema(self): ''' returns the name of the schema given the reference in the swagger method response object ''' _schema = self._r.get('schema') if _schema: if '$ref' in _schema: return _schema.get('$ref').split('/')[-1] raise ValueError(('Method response must have a JSON reference ' 'to the schema definition: {0}'.format(_schema))) return None @property def headers(self): ''' returns the headers dictionary in the method response object ''' _headers = self._r.get('headers', {}) return _headers def __init__(self, api_name, stage_name, lambda_funcname_format, swagger_file_path, error_response_template, response_template, common_aws_args): self._api_name = api_name self._stage_name = stage_name self._lambda_funcname_format = lambda_funcname_format self._common_aws_args = common_aws_args self._restApiId = '' self._deploymentId = '' self._error_response_template = error_response_template self._response_template = response_template if swagger_file_path is not None: if os.path.exists(swagger_file_path) and os.path.isfile(swagger_file_path): self._swagger_file = swagger_file_path self._md5_filehash = _gen_md5_filehash(self._swagger_file, error_response_template, response_template) with salt.utils.files.fopen(self._swagger_file, 'rb') as sf: self._cfg = salt.utils.yaml.safe_load(sf) self._swagger_version = '' else: raise IOError('Invalid swagger file path, {0}'.format(swagger_file_path)) self._validate_swagger_file() self._validate_lambda_funcname_format() self._resolve_api_id() def _is_http_error_rescode(self, code): ''' Helper function to determine if the passed code is in the 400~599 range of http error codes ''' return bool(re.match(r'^\s*[45]\d\d\s*$', code)) def _validate_error_response_model(self, paths, mods): ''' Helper function to help validate the convention established in the swagger file on how to handle response code mapping/integration ''' for path, ops in paths: for opname, opobj in six.iteritems(ops): if opname not in _Swagger.SWAGGER_OPERATION_NAMES: continue if 'responses' not in opobj: raise ValueError('missing mandatory responses field in path item object') for rescode, resobj in six.iteritems(opobj.get('responses')): if not self._is_http_error_rescode(str(rescode)): # future lint: disable=blacklisted-function continue # only check for response code from 400-599 if 'schema' not in resobj: raise ValueError('missing schema field in path {0}, ' 'op {1}, response {2}'.format(path, opname, rescode)) schemaobj = resobj.get('schema') if '$ref' not in schemaobj: raise ValueError('missing $ref field under schema in ' 'path {0}, op {1}, response {2}'.format(path, opname, rescode)) schemaobjref = schemaobj.get('$ref', '/') modelname = schemaobjref.split('/')[-1] if modelname not in mods: raise ValueError('model schema {0} reference not found ' 'under /definitions'.format(schemaobjref)) model = mods.get(modelname) if model.get('type') != 'object': raise ValueError('model schema {0} must be type object'.format(modelname)) if 'properties' not in model: raise ValueError('model schema {0} must have properties fields'.format(modelname)) modelprops = model.get('properties') if 'errorMessage' not in modelprops: raise ValueError('model schema {0} must have errorMessage as a property to ' 'match AWS convention. If pattern is not set, .+ will ' 'be used'.format(modelname)) def _validate_lambda_funcname_format(self): ''' Checks if the lambda function name format contains only known elements :return: True on success, ValueError raised on error ''' try: if self._lambda_funcname_format: known_kwargs = dict(stage='', api='', resource='', method='') self._lambda_funcname_format.format(**known_kwargs) return True except Exception: raise ValueError('Invalid lambda_funcname_format {0}. Please review ' 'documentation for known substitutable keys'.format(self._lambda_funcname_format)) def _validate_swagger_file(self): ''' High level check/validation of the input swagger file based on https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md This is not a full schema compliance check, but rather make sure that the input file (YAML or JSON) can be read into a dictionary, and we check for the content of the Swagger Object for version and info. ''' # check for any invalid fields for Swagger Object V2 for field in self._cfg: if (field not in _Swagger.SWAGGER_OBJ_V2_FIELDS and not _Swagger.VENDOR_EXT_PATTERN.match(field)): raise ValueError('Invalid Swagger Object Field: {0}'.format(field)) # check for Required Swagger fields by Saltstack boto apigateway state for field in _Swagger.SWAGGER_OBJ_V2_FIELDS_REQUIRED: if field not in self._cfg: raise ValueError('Missing Swagger Object Field: {0}'.format(field)) # check for Swagger Version self._swagger_version = self._cfg.get('swagger') if self._swagger_version not in _Swagger.SWAGGER_VERSIONS_SUPPORTED: raise ValueError('Unsupported Swagger version: {0},' 'Supported versions are {1}'.format(self._swagger_version, _Swagger.SWAGGER_VERSIONS_SUPPORTED)) log.info(type(self._models)) self._validate_error_response_model(self.paths, self._models()) @property def md5_filehash(self): ''' returns md5 hash for the swagger file ''' return self._md5_filehash @property def info(self): ''' returns the swagger info object as a dictionary ''' info = self._cfg.get('info') if not info: raise ValueError('Info Object has no values') return info @property def info_json(self): ''' returns the swagger info object as a pretty printed json string. ''' return _dict_to_json_pretty(self.info) @property def rest_api_name(self): ''' returns the name of the api ''' return self._api_name @property def rest_api_version(self): ''' returns the version field in the swagger info object ''' version = self.info.get('version') if not version: raise ValueError('Missing version value in Info Object') return version def _models(self): ''' returns an iterator for the models specified in the swagger file ''' models = self._cfg.get('definitions') if not models: raise ValueError('Definitions Object has no values, You need to define them in your swagger file') return models def models(self): ''' generator to return the tuple of model and its schema to create on aws. ''' model_dict = self._build_all_dependencies() while True: model = self._get_model_without_dependencies(model_dict) if not model: break yield (model, self._models().get(model)) @property def paths(self): ''' returns an iterator for the relative resource paths specified in the swagger file ''' paths = self._cfg.get('paths') if not paths: raise ValueError('Paths Object has no values, You need to define them in your swagger file') for path in paths: if not path.startswith('/'): raise ValueError('Path object {0} should start with /. Please fix it'.format(path)) return six.iteritems(paths) @property def basePath(self): ''' returns the base path field as defined in the swagger file ''' basePath = self._cfg.get('basePath', '') return basePath @property def restApiId(self): ''' returns the rest api id as returned by AWS on creation of the rest api ''' return self._restApiId @restApiId.setter def restApiId(self, restApiId): ''' allows the assignment of the rest api id on creation of the rest api ''' self._restApiId = restApiId @property def deployment_label_json(self): ''' this property returns the unique description in pretty printed json for a particular api deployment ''' return _dict_to_json_pretty(self.deployment_label) @property def deployment_label(self): ''' this property returns the deployment label dictionary (mainly used by stage description) ''' label = dict() label['swagger_info_object'] = self.info label['api_name'] = self.rest_api_name label['swagger_file'] = os.path.basename(self._swagger_file) label['swagger_file_md5sum'] = self.md5_filehash return label # methods to interact with boto_apigateway execution modules def _one_or_more_stages_remain(self, deploymentId): ''' Helper function to find whether there are other stages still associated with a deployment ''' stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args).get('stages') return bool(stages) def no_more_deployments_remain(self): ''' Helper function to find whether there are deployments left with stages associated ''' no_more_deployments = True deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId, **self._common_aws_args).get('deployments') if deployments: for deployment in deployments: deploymentId = deployment.get('id') stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args).get('stages') if stages: no_more_deployments = False break return no_more_deployments def _get_current_deployment_id(self): ''' Helper method to find the deployment id that the stage name is currently assocaited with. ''' deploymentId = '' stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, **self._common_aws_args).get('stage') if stage: deploymentId = stage.get('deploymentId') return deploymentId def _get_current_deployment_label(self): ''' Helper method to find the deployment label that the stage_name is currently associated with. ''' deploymentId = self._get_current_deployment_id() deployment = __salt__['boto_apigateway.describe_api_deployment'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args).get('deployment') if deployment: return deployment.get('description') return None def _get_desired_deployment_id(self): ''' Helper method to return the deployment id matching the desired deployment label for this Swagger object based on the given api_name, swagger_file ''' deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId, **self._common_aws_args).get('deployments') if deployments: for deployment in deployments: if deployment.get('description') == self.deployment_label_json: return deployment.get('id') return '' def overwrite_stage_variables(self, ret, stage_variables): ''' overwrite the given stage_name's stage variables with the given stage_variables ''' res = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId, stageName=self._stage_name, variables=stage_variables, **self._common_aws_args) if not res.get('overwrite'): ret['result'] = False ret['abort'] = True ret['comment'] = res.get('error') else: ret = _log_changes(ret, 'overwrite_stage_variables', res.get('stage')) return ret def _set_current_deployment(self, stage_desc_json, stage_variables): ''' Helper method to associate the stage_name to the given deploymentId and make this current ''' stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, **self._common_aws_args).get('stage') if not stage: stage = __salt__['boto_apigateway.create_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, deploymentId=self._deploymentId, description=stage_desc_json, variables=stage_variables, **self._common_aws_args) if not stage.get('stage'): return {'set': False, 'error': stage.get('error')} else: # overwrite the stage variables overwrite = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId, stageName=self._stage_name, variables=stage_variables, **self._common_aws_args) if not overwrite.get('stage'): return {'set': False, 'error': overwrite.get('error')} return __salt__['boto_apigateway.activate_api_deployment'](restApiId=self.restApiId, stageName=self._stage_name, deploymentId=self._deploymentId, **self._common_aws_args) def _resolve_api_id(self): ''' returns an Api Id that matches the given api_name and the hardcoded _Swagger.AWS_API_DESCRIPTION as the api description ''' apis = __salt__['boto_apigateway.describe_apis'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args).get('restapi') if apis: if len(apis) == 1: self.restApiId = apis[0].get('id') else: raise ValueError('Multiple APIs matching given name {0} and ' 'description {1}'.format(self.rest_api_name, self.info_json)) def delete_stage(self, ret): ''' Method to delete the given stage_name. If the current deployment tied to the given stage_name has no other stages associated with it, the deployment will be removed as well ''' deploymentId = self._get_current_deployment_id() if deploymentId: result = __salt__['boto_apigateway.delete_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, **self._common_aws_args) if not result.get('deleted'): ret['abort'] = True ret['result'] = False ret['comment'] = 'delete_stage delete_api_stage, {0}'.format(result.get('error')) else: # check if it is safe to delete the deployment as well. if not self._one_or_more_stages_remain(deploymentId): result = __salt__['boto_apigateway.delete_api_deployment'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args) if not result.get('deleted'): ret['abort'] = True ret['result'] = False ret['comment'] = 'delete_stage delete_api_deployment, {0}'.format(result.get('error')) else: ret['comment'] = 'stage {0} has been deleted.\n'.format(self._stage_name) else: # no matching stage_name/deployment found ret['comment'] = 'stage {0} does not exist'.format(self._stage_name) return ret def verify_api(self, ret): ''' this method helps determine if the given stage_name is already on a deployment label matching the input api_name, swagger_file. If yes, returns abort with comment indicating already at desired state. If not and there is previous deployment labels in AWS matching the given input api_name and swagger file, indicate to the caller that we only need to reassociate stage_name to the previously existing deployment label. ''' if self.restApiId: deployed_label_json = self._get_current_deployment_label() if deployed_label_json == self.deployment_label_json: ret['comment'] = ('Already at desired state, the stage {0} is already at the desired ' 'deployment label:\n{1}'.format(self._stage_name, deployed_label_json)) ret['current'] = True return ret else: self._deploymentId = self._get_desired_deployment_id() if self._deploymentId: ret['publish'] = True return ret def publish_api(self, ret, stage_variables): ''' this method tie the given stage_name to a deployment matching the given swagger_file ''' stage_desc = dict() stage_desc['current_deployment_label'] = self.deployment_label stage_desc_json = _dict_to_json_pretty(stage_desc) if self._deploymentId: # just do a reassociate of stage_name to an already existing deployment res = self._set_current_deployment(stage_desc_json, stage_variables) if not res.get('set'): ret['abort'] = True ret['result'] = False ret['comment'] = res.get('error') else: ret = _log_changes(ret, 'publish_api (reassociate deployment, set stage_variables)', res.get('response')) else: # no deployment existed for the given swagger_file for this Swagger object res = __salt__['boto_apigateway.create_api_deployment'](restApiId=self.restApiId, stageName=self._stage_name, stageDescription=stage_desc_json, description=self.deployment_label_json, variables=stage_variables, **self._common_aws_args) if not res.get('created'): ret['abort'] = True ret['result'] = False ret['comment'] = res.get('error') else: ret = _log_changes(ret, 'publish_api (new deployment)', res.get('deployment')) return ret def _cleanup_api(self): ''' Helper method to clean up resources and models if we detected a change in the swagger file for a stage ''' resources = __salt__['boto_apigateway.describe_api_resources'](restApiId=self.restApiId, **self._common_aws_args) if resources.get('resources'): res = resources.get('resources')[1:] res.reverse() for resource in res: delres = __salt__['boto_apigateway.delete_api_resources'](restApiId=self.restApiId, path=resource.get('path'), **self._common_aws_args) if not delres.get('deleted'): return delres models = __salt__['boto_apigateway.describe_api_models'](restApiId=self.restApiId, **self._common_aws_args) if models.get('models'): for model in models.get('models'): delres = __salt__['boto_apigateway.delete_api_model'](restApiId=self.restApiId, modelName=model.get('name'), **self._common_aws_args) if not delres.get('deleted'): return delres return {'deleted': True} def deploy_api(self, ret): ''' this method create the top level rest api in AWS apigateway ''' if self.restApiId: res = self._cleanup_api() if not res.get('deleted'): ret['comment'] = 'Failed to cleanup restAreId {0}'.format(self.restApiId) ret['abort'] = True ret['result'] = False return ret return ret response = __salt__['boto_apigateway.create_api'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args) if not response.get('created'): ret['result'] = False ret['abort'] = True if 'error' in response: ret['comment'] = 'Failed to create rest api: {0}.'.format(response['error']['message']) return ret self.restApiId = response.get('restapi', {}).get('id') return _log_changes(ret, 'deploy_api', response.get('restapi')) def delete_api(self, ret): ''' Method to delete a Rest Api named defined in the swagger file's Info Object's title value. ret a dictionary for returning status to Saltstack ''' exists_response = __salt__['boto_apigateway.api_exists'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args) if exists_response.get('exists'): if __opts__['test']: ret['comment'] = 'Rest API named {0} is set to be deleted.'.format(self.rest_api_name) ret['result'] = None ret['abort'] = True return ret delete_api_response = __salt__['boto_apigateway.delete_api'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args) if not delete_api_response.get('deleted'): ret['result'] = False ret['abort'] = True if 'error' in delete_api_response: ret['comment'] = 'Failed to delete rest api: {0}.'.format(delete_api_response['error']['message']) return ret ret = _log_changes(ret, 'delete_api', delete_api_response) else: ret['comment'] = ('api already absent for swagger file: ' '{0}, desc: {1}'.format(self.rest_api_name, self.info_json)) return ret def _aws_model_ref_from_swagger_ref(self, r): ''' Helper function to reference models created on aws apigw ''' model_name = r.split('/')[-1] return 'https://apigateway.amazonaws.com/restapis/{0}/models/{1}'.format(self.restApiId, model_name) def _update_schema_to_aws_notation(self, schema): ''' Helper function to map model schema to aws notation ''' result = {} for k, v in schema.items(): if k == '$ref': v = self._aws_model_ref_from_swagger_ref(v) if isinstance(v, dict): v = self._update_schema_to_aws_notation(v) result[k] = v return result def _build_dependent_model_list(self, obj_schema): ''' Helper function to build the list of models the given object schema is referencing. ''' dep_models_list = [] if obj_schema: obj_schema['type'] = obj_schema.get('type', 'object') if obj_schema['type'] == 'array': dep_models_list.extend(self._build_dependent_model_list(obj_schema.get('items', {}))) else: ref = obj_schema.get('$ref') if ref: ref_obj_model = ref.split("/")[-1] ref_obj_schema = self._models().get(ref_obj_model) dep_models_list.extend(self._build_dependent_model_list(ref_obj_schema)) dep_models_list.extend([ref_obj_model]) else: # need to walk each property object properties = obj_schema.get('properties') if properties: for _, prop_obj_schema in six.iteritems(properties): dep_models_list.extend(self._build_dependent_model_list(prop_obj_schema)) return list(set(dep_models_list)) def _build_all_dependencies(self): ''' Helper function to build a map of model to their list of model reference dependencies ''' ret = {} for model, schema in six.iteritems(self._models()): dep_list = self._build_dependent_model_list(schema) ret[model] = dep_list return ret def _get_model_without_dependencies(self, models_dict): ''' Helper function to find the next model that should be created ''' next_model = None if not models_dict: return next_model for model, dependencies in six.iteritems(models_dict): if dependencies == []: next_model = model break if next_model is None: raise ValueError('incomplete model definitions, models in dependency ' 'list not defined: {0}'.format(models_dict)) # remove the model from other depednencies before returning models_dict.pop(next_model) for model, dep_list in six.iteritems(models_dict): if next_model in dep_list: dep_list.remove(next_model) return next_model def deploy_models(self, ret): ''' Method to deploy swagger file's definition objects and associated schema to AWS Apigateway as Models ret a dictionary for returning status to Saltstack ''' for model, schema in self.models(): # add in a few attributes into the model schema that AWS expects # _schema = schema.copy() _schema = self._update_schema_to_aws_notation(schema) _schema.update({'$schema': _Swagger.JSON_SCHEMA_DRAFT_4, 'title': '{0} Schema'.format(model)}) # check to see if model already exists, aws has 2 default models [Empty, Error] # which may need upate with data from swagger file model_exists_response = __salt__['boto_apigateway.api_model_exists'](restApiId=self.restApiId, modelName=model, **self._common_aws_args) if model_exists_response.get('exists'): update_model_schema_response = ( __salt__['boto_apigateway.update_api_model_schema'](restApiId=self.restApiId, modelName=model, schema=_dict_to_json_pretty(_schema), **self._common_aws_args)) if not update_model_schema_response.get('updated'): ret['result'] = False ret['abort'] = True if 'error' in update_model_schema_response: ret['comment'] = ('Failed to update existing model {0} with schema {1}, ' 'error: {2}'.format(model, _dict_to_json_pretty(schema), update_model_schema_response['error']['message'])) return ret ret = _log_changes(ret, 'deploy_models', update_model_schema_response) else: create_model_response = ( __salt__['boto_apigateway.create_api_model'](restApiId=self.restApiId, modelName=model, modelDescription=model, schema=_dict_to_json_pretty(_schema), contentType='application/json', **self._common_aws_args)) if not create_model_response.get('created'): ret['result'] = False ret['abort'] = True if 'error' in create_model_response: ret['comment'] = ('Failed to create model {0}, schema {1}, ' 'error: {2}'.format(model, _dict_to_json_pretty(schema), create_model_response['error']['message'])) return ret ret = _log_changes(ret, 'deploy_models', create_model_response) return ret def _lambda_name(self, resourcePath, httpMethod): ''' Helper method to construct lambda name based on the rule specified in doc string of boto_apigateway.api_present function ''' lambda_name = self._lambda_funcname_format.format(stage=self._stage_name, api=self.rest_api_name, resource=resourcePath, method=httpMethod) lambda_name = lambda_name.strip() lambda_name = re.sub(r'{|}', '', lambda_name) lambda_name = re.sub(r'\s+|/', '_', lambda_name).lower() return re.sub(r'_+', '_', lambda_name) def _lambda_uri(self, lambda_name, lambda_region): ''' Helper Method to construct the lambda uri for use in method integration ''' profile = self._common_aws_args.get('profile') region = self._common_aws_args.get('region') lambda_region = __utils__['boto3.get_region']('lambda', lambda_region, profile) apigw_region = __utils__['boto3.get_region']('apigateway', region, profile) lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args) if lambda_region != apigw_region: if not lambda_desc.get('function'): # try look up in the same region as the apigateway as well if previous lookup failed lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args) if not lambda_desc.get('function'): raise ValueError('Could not find lambda function {0} in ' 'regions [{1}, {2}].'.format(lambda_name, lambda_region, apigw_region)) lambda_arn = lambda_desc.get('function').get('FunctionArn') lambda_uri = ('arn:aws:apigateway:{0}:lambda:path/2015-03-31' '/functions/{1}/invocations'.format(apigw_region, lambda_arn)) return lambda_uri def _parse_method_data(self, method_name, method_data): ''' Helper function to construct the method request params, models, request_templates and integration_type values needed to configure method request integration/mappings. ''' method_params = {} method_models = {} if 'parameters' in method_data: for param in method_data['parameters']: p = _Swagger.SwaggerParameter(param) if p.name: method_params[p.name] = True if p.schema: method_models['application/json'] = p.schema request_templates = _Swagger.REQUEST_OPTION_TEMPLATE if method_name == 'options' else _Swagger.REQUEST_TEMPLATE integration_type = "MOCK" if method_name == 'options' else "AWS" return {'params': method_params, 'models': method_models, 'request_templates': request_templates, 'integration_type': integration_type} def _find_patterns(self, o): result = [] if isinstance(o, dict): for k, v in six.iteritems(o): if isinstance(v, dict): result.extend(self._find_patterns(v)) else: if k == 'pattern': result.append(v) return result def _get_pattern_for_schema(self, schema_name, httpStatus): ''' returns the pattern specified in a response schema ''' defaultPattern = '.+' if self._is_http_error_rescode(httpStatus) else '.*' model = self._models().get(schema_name) patterns = self._find_patterns(model) return patterns[0] if patterns else defaultPattern def _get_response_template(self, method_name, http_status): if method_name == 'options' or not self._is_http_error_rescode(http_status): response_templates = {'application/json': self._response_template} \ if self._response_template else self.RESPONSE_OPTION_TEMPLATE else: response_templates = {'application/json': self._error_response_template} \ if self._error_response_template else self.RESPONSE_TEMPLATE return response_templates def _parse_method_response(self, method_name, method_response, httpStatus): ''' Helper function to construct the method response params, models, and integration_params values needed to configure method response integration/mappings. ''' method_response_models = {} method_response_pattern = '.*' if method_response.schema: method_response_models['application/json'] = method_response.schema method_response_pattern = self._get_pattern_for_schema(method_response.schema, httpStatus) method_response_params = {} method_integration_response_params = {} for header in method_response.headers: response_header = 'method.response.header.{0}'.format(header) method_response_params[response_header] = False header_data = method_response.headers.get(header) method_integration_response_params[response_header] = ( "'{0}'".format(header_data.get('default')) if 'default' in header_data else "'*'") response_templates = self._get_response_template(method_name, httpStatus) return {'params': method_response_params, 'models': method_response_models, 'integration_params': method_integration_response_params, 'pattern': method_response_pattern, 'response_templates': response_templates} def _deploy_method(self, ret, resource_path, method_name, method_data, api_key_required, lambda_integration_role, lambda_region, authorization_type): ''' Method to create a method for the given resource path, along with its associated request and response integrations. ret a dictionary for returning status to Saltstack resource_path the full resource path where the named method_name will be associated with. method_name a string that is one of the following values: 'delete', 'get', 'head', 'options', 'patch', 'post', 'put' method_data the value dictionary for this method in the swagger definition file. api_key_required True or False, whether api key is required to access this method. lambda_integration_role name of the IAM role or IAM role arn that Api Gateway will assume when executing the associated lambda function lambda_region the region for the lambda function that Api Gateway will integrate to. authorization_type 'NONE' or 'AWS_IAM' ''' method = self._parse_method_data(method_name.lower(), method_data) # for options method to enable CORS, api_key_required will be set to False always. # authorization_type will be set to 'NONE' always. if method_name.lower() == 'options': api_key_required = False authorization_type = 'NONE' m = __salt__['boto_apigateway.create_api_method'](restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), authorizationType=authorization_type, apiKeyRequired=api_key_required, requestParameters=method.get('params'), requestModels=method.get('models'), **self._common_aws_args) if not m.get('created'): ret = _log_error_and_abort(ret, m) return ret ret = _log_changes(ret, '_deploy_method.create_api_method', m) lambda_uri = "" if method_name.lower() != 'options': lambda_uri = self._lambda_uri(self._lambda_name(resource_path, method_name), lambda_region=lambda_region) # NOTE: integration method is set to POST always, as otherwise AWS makes wrong assumptions # about the intent of the call. HTTP method will be passed to lambda as part of the API gateway context integration = ( __salt__['boto_apigateway.create_api_integration'](restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), integrationType=method.get('integration_type'), integrationHttpMethod='POST', uri=lambda_uri, credentials=lambda_integration_role, requestTemplates=method.get('request_templates'), **self._common_aws_args)) if not integration.get('created'): ret = _log_error_and_abort(ret, integration) return ret ret = _log_changes(ret, '_deploy_method.create_api_integration', integration) if 'responses' in method_data: for response, response_data in six.iteritems(method_data['responses']): httpStatus = str(response) # future lint: disable=blacklisted-function method_response = self._parse_method_response(method_name.lower(), _Swagger.SwaggerMethodResponse(response_data), httpStatus) mr = __salt__['boto_apigateway.create_api_method_response']( restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), statusCode=httpStatus, responseParameters=method_response.get('params'), responseModels=method_response.get('models'), **self._common_aws_args) if not mr.get('created'): ret = _log_error_and_abort(ret, mr) return ret ret = _log_changes(ret, '_deploy_method.create_api_method_response', mr) mir = __salt__['boto_apigateway.create_api_integration_response']( restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), statusCode=httpStatus, selectionPattern=method_response.get('pattern'), responseParameters=method_response.get('integration_params'), responseTemplates=method_response.get('response_templates'), **self._common_aws_args) if not mir.get('created'): ret = _log_error_and_abort(ret, mir) return ret ret = _log_changes(ret, '_deploy_method.create_api_integration_response', mir) else: raise ValueError('No responses specified for {0} {1}'.format(resource_path, method_name)) return ret def deploy_resources(self, ret, api_key_required, lambda_integration_role, lambda_region, authorization_type): ''' Method to deploy resources defined in the swagger file. ret a dictionary for returning status to Saltstack api_key_required True or False, whether api key is required to access this method. lambda_integration_role name of the IAM role or IAM role arn that Api Gateway will assume when executing the associated lambda function lambda_region the region for the lambda function that Api Gateway will integrate to. authorization_type 'NONE' or 'AWS_IAM' ''' for path, pathData in self.paths: resource = __salt__['boto_apigateway.create_api_resources'](restApiId=self.restApiId, path=path, **self._common_aws_args) if not resource.get('created'): ret = _log_error_and_abort(ret, resource) return ret ret = _log_changes(ret, 'deploy_resources', resource) for method, method_data in six.iteritems(pathData): if method in _Swagger.SWAGGER_OPERATION_NAMES: ret = self._deploy_method(ret, path, method, method_data, api_key_required, lambda_integration_role, lambda_region, authorization_type) return ret def usage_plan_present(name, plan_name, description=None, throttle=None, quota=None, region=None, key=None, keyid=None, profile=None): ''' Ensure the spcifieda usage plan with the corresponding metrics is deployed .. versionadded:: 2017.7.0 name name of the state plan_name [Required] name of the usage plan throttle [Optional] throttling parameters expressed as a dictionary. If provided, at least one of the throttling parameters must be present rateLimit rate per second at which capacity bucket is populated burstLimit maximum rate allowed quota [Optional] quota on the number of api calls permitted by the plan. If provided, limit and period must be present limit [Required] number of calls permitted per quota period offset [Optional] number of calls to be subtracted from the limit at the beginning of the period period [Required] period to which quota applies. Must be DAY, WEEK or MONTH .. code-block:: yaml UsagePlanPresent: boto_apigateway.usage_plan_present: - plan_name: my_usage_plan - throttle: rateLimit: 70 burstLimit: 100 - quota: limit: 1000 offset: 0 period: DAY - profile: my_profile ''' func_params = locals() ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) existing = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args) if 'error' in existing: ret['result'] = False ret['comment'] = 'Failed to describe existing usage plans' return ret if not existing['plans']: # plan does not exist, we need to create it if __opts__['test']: ret['comment'] = 'a new usage plan {0} would be created'.format(plan_name) ret['result'] = None return ret result = __salt__['boto_apigateway.create_usage_plan'](name=plan_name, description=description, throttle=throttle, quota=quota, **common_args) if 'error' in result: ret['result'] = False ret['comment'] = 'Failed to create a usage plan {0}, {1}'.format(plan_name, result['error']) return ret ret['changes']['old'] = {'plan': None} ret['comment'] = 'A new usage plan {0} has been created'.format(plan_name) else: # need an existing plan modified to match given value plan = existing['plans'][0] needs_updating = False modifiable_params = (('throttle', ('rateLimit', 'burstLimit')), ('quota', ('limit', 'offset', 'period'))) for p, fields in modifiable_params: for f in fields: actual_param = {} if func_params.get(p) is None else func_params.get(p) if plan.get(p, {}).get(f, None) != actual_param.get(f, None): needs_updating = True break if not needs_updating: ret['comment'] = 'usage plan {0} is already in a correct state'.format(plan_name) ret['result'] = True return ret if __opts__['test']: ret['comment'] = 'a new usage plan {0} would be updated'.format(plan_name) ret['result'] = None return ret result = __salt__['boto_apigateway.update_usage_plan'](plan['id'], throttle=throttle, quota=quota, **common_args) if 'error' in result: ret['result'] = False ret['comment'] = 'Failed to update a usage plan {0}, {1}'.format(plan_name, result['error']) return ret ret['changes']['old'] = {'plan': plan} ret['comment'] = 'usage plan {0} has been updated'.format(plan_name) newstate = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args) if 'error' in existing: ret['result'] = False ret['comment'] = 'Failed to describe existing usage plans after updates' return ret ret['changes']['new'] = {'plan': newstate['plans'][0]} except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret def usage_plan_absent(name, plan_name, region=None, key=None, keyid=None, profile=None): ''' Ensures usage plan identified by name is no longer present .. versionadded:: 2017.7.0 name name of the state plan_name name of the plan to remove .. code-block:: yaml usage plan absent: boto_apigateway.usage_plan_absent: - plan_name: my_usage_plan - profile: my_profile ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) existing = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args) if 'error' in existing: ret['result'] = False ret['comment'] = 'Failed to describe existing usage plans' return ret if not existing['plans']: ret['comment'] = 'Usage plan {0} does not exist already'.format(plan_name) return ret if __opts__['test']: ret['comment'] = 'Usage plan {0} exists and would be deleted'.format(plan_name) ret['result'] = None return ret plan_id = existing['plans'][0]['id'] result = __salt__['boto_apigateway.delete_usage_plan'](plan_id, **common_args) if 'error' in result: ret['result'] = False ret['comment'] = 'Failed to delete usage plan {0}, {1}'.format(plan_name, result) return ret ret['comment'] = 'Usage plan {0} has been deleted'.format(plan_name) ret['changes']['old'] = {'plan': existing['plans'][0]} ret['changes']['new'] = {'plan': None} except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret def usage_plan_association_present(name, plan_name, api_stages, region=None, key=None, keyid=None, profile=None): ''' Ensures usage plan identified by name is added to provided api_stages .. versionadded:: 2017.7.0 name name of the state plan_name name of the plan to use api_stages list of dictionaries, where each dictionary consists of the following keys: apiId apiId of the api to attach usage plan to stage stage name of the api to attach usage plan to .. code-block:: yaml UsagePlanAssociationPresent: boto_apigateway.usage_plan_association_present: - plan_name: my_plan - api_stages: - apiId: 9kb0404ec0 stage: my_stage - apiId: l9v7o2aj90 stage: my_stage - profile: my_profile ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) existing = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args) if 'error' in existing: ret['result'] = False ret['comment'] = 'Failed to describe existing usage plans' return ret if not existing['plans']: ret['comment'] = 'Usage plan {0} does not exist'.format(plan_name) ret['result'] = False return ret if len(existing['plans']) != 1: ret['comment'] = 'There are multiple usage plans with the same name - it is not supported' ret['result'] = False return ret plan = existing['plans'][0] plan_id = plan['id'] plan_stages = plan.get('apiStages', []) stages_to_add = [] for api in api_stages: if api not in plan_stages: stages_to_add.append(api) if not stages_to_add: ret['comment'] = 'Usage plan is already asssociated to all api stages' return ret result = __salt__['boto_apigateway.attach_usage_plan_to_apis'](plan_id, stages_to_add, **common_args) if 'error' in result: ret['comment'] = 'Failed to associate a usage plan {0} to the apis {1}, {2}'.format(plan_name, stages_to_add, result['error']) ret['result'] = False return ret ret['comment'] = 'successfully associated usage plan to apis' ret['changes']['old'] = plan_stages ret['changes']['new'] = result.get('result', {}).get('apiStages', []) except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret def usage_plan_association_absent(name, plan_name, api_stages, region=None, key=None, keyid=None, profile=None): ''' Ensures usage plan identified by name is removed from provided api_stages If a plan is associated to stages not listed in api_stages parameter, those associations remain intact. .. versionadded:: 2017.7.0 name name of the state plan_name name of the plan to use api_stages list of dictionaries, where each dictionary consists of the following keys: apiId apiId of the api to detach usage plan from stage stage name of the api to detach usage plan from .. code-block:: yaml UsagePlanAssociationAbsent: boto_apigateway.usage_plan_association_absent: - plan_name: my_plan - api_stages: - apiId: 9kb0404ec0 stage: my_stage - apiId: l9v7o2aj90 stage: my_stage - profile: my_profile ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) existing = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args) if 'error' in existing: ret['result'] = False ret['comment'] = 'Failed to describe existing usage plans' return ret if not existing['plans']: ret['comment'] = 'Usage plan {0} does not exist'.format(plan_name) ret['result'] = False return ret if len(existing['plans']) != 1: ret['comment'] = 'There are multiple usage plans with the same name - it is not supported' ret['result'] = False return ret plan = existing['plans'][0] plan_id = plan['id'] plan_stages = plan.get('apiStages', []) if not plan_stages: ret['comment'] = 'Usage plan {0} has no associated stages already'.format(plan_name) return ret stages_to_remove = [] for api in api_stages: if api in plan_stages: stages_to_remove.append(api) if not stages_to_remove: ret['comment'] = 'Usage plan is already not asssociated to any api stages' return ret result = __salt__['boto_apigateway.detach_usage_plan_from_apis'](plan_id, stages_to_remove, **common_args) if 'error' in result: ret['comment'] = 'Failed to disassociate a usage plan {0} from the apis {1}, {2}'.format(plan_name, stages_to_remove, result['error']) ret['result'] = False return ret ret['comment'] = 'successfully disassociated usage plan from apis' ret['changes']['old'] = plan_stages ret['changes']['new'] = result.get('result', {}).get('apiStages', []) except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret
saltstack/salt
salt/states/boto_apigateway.py
_log_changes
python
def _log_changes(ret, changekey, changevalue): ''' For logging create/update/delete operations to AWS ApiGateway ''' cl = ret['changes'].get('new', []) cl.append({changekey: _object_reducer(changevalue)}) ret['changes']['new'] = cl return ret
For logging create/update/delete operations to AWS ApiGateway
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_apigateway.py#L514-L521
[ "def _object_reducer(o, names=('id', 'name', 'path', 'httpMethod',\n 'statusCode', 'Created', 'Deleted',\n 'Updated', 'Flushed', 'Associated', 'Disassociated')):\n '''\n Helper function to reduce the amount of information that will be kept in the change log\n for API GW related return values\n '''\n result = {}\n if isinstance(o, dict):\n for k, v in six.iteritems(o):\n if isinstance(v, dict):\n reduced = v if k == 'variables' else _object_reducer(v, names)\n if reduced or _name_matches(k, names):\n result[k] = reduced\n elif isinstance(v, list):\n newlist = []\n for val in v:\n reduced = _object_reducer(val, names)\n if reduced or _name_matches(k, names):\n newlist.append(reduced)\n if newlist:\n result[k] = newlist\n else:\n if _name_matches(k, names):\n result[k] = v\n return result\n" ]
# -*- coding: utf-8 -*- ''' Manage Apigateway Rest APIs =========================== .. versionadded:: 2016.11.0 :depends: - boto >= 2.8.0 - boto3 >= 1.2.1 - botocore >= 1.4.49 Create and destroy rest apis depending on a swagger version 2 definition file. Be aware that this interacts with Amazon's services, and so may incur charges. This module uses ``boto3``, which can be installed via package, or pip. This module accepts explicit vpc credentials but can also utilize IAM roles assigned to the instance through Instance Profiles. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More information available `here <http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html>`_. If IAM roles are not used you need to specify them either in a pillar file or in the minion's config file: .. code-block:: yaml vpc.keyid: GKTADJGHEIQSXMKKRBJ08H vpc.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs It's also possible to specify ``key``, ``keyid`` and ``region`` via a profile, either passed in as a dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 .. code-block:: yaml Ensure Apigateway API exists: boto_apigateway.present: - name: myfunction - region: us-east-1 - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import hashlib import logging import os import re # Import Salt Libs import salt.utils.files import salt.utils.json import salt.utils.yaml # Import 3rd-party libs from salt.ext import six log = logging.getLogger(__name__) def __virtual__(): ''' Only load if boto is available. ''' return 'boto_apigateway' if 'boto_apigateway.describe_apis' in __salt__ else False def present(name, api_name, swagger_file, stage_name, api_key_required, lambda_integration_role, lambda_region=None, stage_variables=None, region=None, key=None, keyid=None, profile=None, lambda_funcname_format='{stage}_{api}_{resource}_{method}', authorization_type='NONE', error_response_template=None, response_template=None): ''' Ensure the spcified api_name with the corresponding swaggerfile is deployed to the given stage_name in AWS ApiGateway. this state currently only supports ApiGateway integration with AWS Lambda, and CORS support is handled through a Mock integration. There may be multiple deployments for the API object, each deployment is tagged with a description (i.e. unique label) in pretty printed json format consisting of the following key/values. .. code-block:: text { "api_name": api_name, "swagger_file": basename_of_swagger_file "swagger_file_md5sum": md5sum_of_swagger_file, "swagger_info_object": info_object_content_in_swagger_file } Please note that the name of the lambda function to be integrated will be derived via the provided lambda_funcname_format parameters: - the default lambda_funcname_format is a string with the following substitutable keys: "{stage}_{api}_{resource}_{method}". The user can choose to reorder the known keys. - the stage key corresponds to the stage_name passed in. - the api key corresponds to the api_name passed in. - the resource corresponds to the resource path defined in the passed swagger file. - the method corresponds to the method for a resource path defined in the passed swagger file. For the default lambda_funcname_format, given the following input: .. code-block:: python api_name = ' Test Service' stage_name = 'alpha' basePath = '/api' path = '/a/{b}/c' method = 'POST' We will end up with the following Lambda Function Name that will be looked up: 'test_service_alpha_a_b_c_post' The canconicalization of these input parameters is done in the following order: 1. lambda_funcname_format is formatted with the input parameters as passed, 2. resulting string is stripped for leading/trailing spaces, 3. path parameter's curly braces are removed from the resource path, 4. consecutive spaces and forward slashes in the paths are replaced with '_' 5. consecutive '_' are replaced with '_' Please note that for error response handling, the swagger file must have an error response model with the following schema. The lambda functions should throw exceptions for any non successful responses. An optional pattern field can be specified in errorMessage field to aid the response mapping from Lambda to the proper error return status codes. .. code-block:: yaml Error: type: object properties: stackTrace: type: array items: type: array items: type: string description: call stack errorType: type: string description: error type errorMessage: type: string description: | Error message, will be matched based on pattern. If no pattern is specified, the default pattern used for response mapping will be +*. name The name of the state definition api_name The name of the rest api that we want to ensure exists in AWS API Gateway swagger_file Name of the location of the swagger rest api definition file in YAML format. stage_name Name of the stage we want to be associated with the given api_name and swagger_file definition api_key_required True or False - whether the API Key is required to call API methods lambda_integration_role The name or ARN of the IAM role that the AWS ApiGateway assumes when it executes your lambda function to handle incoming requests lambda_region The region where we expect to find the lambda functions. This is used to determine the region where we should look for the Lambda Function for integration purposes. The region determination is based on the following priority: 1. lambda_region as passed in (is not None) 2. if lambda_region is None, use the region as if a boto_lambda function were executed without explicitly specifying lambda region. 3. if region determined in (2) is different than the region used by boto_apigateway functions, a final lookup will be attempted using the boto_apigateway region. stage_variables A dict with variables and their values, or a pillar key (string) that contains a dict with variables and their values. key and values in the dict must be strings. {'string': 'string'} region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. lambda_funcname_format Please review the earlier example for the usage. The only substituable keys in the funcname format are {stage}, {api}, {resource}, {method}. Any other keys or positional subsitution parameters will be flagged as an invalid input. authorization_type This field can be either 'NONE', or 'AWS_IAM'. This will be applied to all methods in the given swagger spec file. Default is set to 'NONE' error_response_template String value that defines the response template mapping that should be applied in cases error occurs. Refer to AWS documentation for details: http://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-mapping-template-reference.html If set to None, the following default value is used: .. code-block:: text '#set($inputRoot = $input.path(\'$\'))\\n' '{\\n' ' "errorMessage" : "$inputRoot.errorMessage",\\n' ' "errorType" : "$inputRoot.errorType",\\n' ' "stackTrace" : [\\n' '#foreach($stackTrace in $inputRoot.stackTrace)\\n' ' [\\n' '#foreach($elem in $stackTrace)\\n' ' "$elem"\\n' '#if($foreach.hasNext),#end\\n' '#end\\n' ' ]\\n' '#if($foreach.hasNext),#end\\n' '#end\\n' ' ]\\n' .. versionadded:: 2017.7.0 response_template String value that defines the response template mapping applied in case of success (including OPTIONS method) If set to None, empty ({}) template is assumed, which will transfer response from the lambda function as is. .. versionadded:: 2017.7.0 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) # try to open the swagger file and basic validation swagger = _Swagger(api_name, stage_name, lambda_funcname_format, swagger_file, error_response_template, response_template, common_args) # retrieve stage variables stage_vars = _get_stage_variables(stage_variables) # verify if api and stage already exists ret = swagger.verify_api(ret) if ret.get('publish'): # there is a deployment label with signature matching the given api_name, # swagger file name, swagger file md5 sum, and swagger file info object # just reassociate the stage_name to the given deployment label. if __opts__['test']: ret['comment'] = ('[stage: {0}] will be reassociated to an already available ' 'deployment that matched the given [api_name: {1}] ' 'and [swagger_file: {2}].\n' 'Stage variables will be set ' 'to {3}.'.format(stage_name, api_name, swagger_file, stage_vars)) ret['result'] = None return ret return swagger.publish_api(ret, stage_vars) if ret.get('current'): # already at desired state for the stage, swagger_file, and api_name if __opts__['test']: ret['comment'] = ('[stage: {0}] is already at desired state with an associated ' 'deployment matching the given [api_name: {1}] ' 'and [swagger_file: {2}].\n' 'Stage variables will be set ' 'to {3}.'.format(stage_name, api_name, swagger_file, stage_vars)) ret['result'] = None return swagger.overwrite_stage_variables(ret, stage_vars) # there doesn't exist any previous deployments for the given swagger_file, we need # to redeploy the content of the swagger file to the api, models, and resources object # and finally create a new deployment and tie the stage_name to this new deployment if __opts__['test']: ret['comment'] = ('There is no deployment matching the given [api_name: {0}] ' 'and [swagger_file: {1}]. A new deployment will be ' 'created and the [stage_name: {2}] will then be associated ' 'to the newly created deployment.\n' 'Stage variables will be set ' 'to {3}.'.format(api_name, swagger_file, stage_name, stage_vars)) ret['result'] = None return ret ret = swagger.deploy_api(ret) if ret.get('abort'): return ret ret = swagger.deploy_models(ret) if ret.get('abort'): return ret ret = swagger.deploy_resources(ret, api_key_required=api_key_required, lambda_integration_role=lambda_integration_role, lambda_region=lambda_region, authorization_type=authorization_type) if ret.get('abort'): return ret ret = swagger.publish_api(ret, stage_vars) except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret def _get_stage_variables(stage_variables): ''' Helper function to retrieve stage variables from pillars/options, if the input is a string ''' ret = dict() if stage_variables is None: return ret if isinstance(stage_variables, six.string_types): if stage_variables in __opts__: ret = __opts__[stage_variables] master_opts = __pillar__.get('master', {}) if stage_variables in master_opts: ret = master_opts[stage_variables] if stage_variables in __pillar__: ret = __pillar__[stage_variables] elif isinstance(stage_variables, dict): ret = stage_variables if not isinstance(ret, dict): ret = dict() return ret def absent(name, api_name, stage_name, nuke_api=False, region=None, key=None, keyid=None, profile=None): ''' Ensure the stage_name associated with the given api_name deployed by boto_apigateway's present state is removed. If the currently associated deployment to the given stage_name has no other stages associated with it, the deployment will also be removed. name Name of the swagger file in YAML format api_name Name of the rest api on AWS ApiGateway to ensure is absent. stage_name Name of the stage to be removed irrespective of the swagger file content. If the current deployment associated with the stage_name has no other stages associated with it, the deployment will also be removed. nuke_api If True, removes the API itself only if there are no other stages associated with any other deployments once the given stage_name is removed. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) swagger = _Swagger(api_name, stage_name, '', None, None, None, common_args) if not swagger.restApiId: ret['comment'] = '[Rest API: {0}] does not exist.'.format(api_name) return ret if __opts__['test']: if nuke_api: ret['comment'] = ('[stage: {0}] will be deleted, if there are no other ' 'active stages, the [api: {1} will also be ' 'deleted.'.format(stage_name, api_name)) else: ret['comment'] = ('[stage: {0}] will be deleted.'.format(stage_name)) ret['result'] = None return ret ret = swagger.delete_stage(ret) if ret.get('abort'): return ret if nuke_api and swagger.no_more_deployments_remain(): ret = swagger.delete_api(ret) except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret # Helper Swagger Class for swagger version 2.0 API specification def _gen_md5_filehash(fname, *args): ''' helper function to generate a md5 hash of the swagger definition file any extra argument passed to the function is converted to a string and participates in the hash calculation ''' _hash = hashlib.md5() with salt.utils.files.fopen(fname, 'rb') as f: for chunk in iter(lambda: f.read(4096), b''): _hash.update(chunk) for extra_arg in args: _hash.update(six.b(str(extra_arg))) return _hash.hexdigest() def _dict_to_json_pretty(d, sort_keys=True): ''' helper function to generate pretty printed json output ''' return salt.utils.json.dumps(d, indent=4, separators=(',', ': '), sort_keys=sort_keys) # Heuristic on whether or not the property name loosely matches given set of 'interesting' factors # If you are interested in IDs for example, 'id', 'blah_id', 'blahId' would all match def _name_matches(name, matches): ''' Helper function to see if given name has any of the patterns in given matches ''' for m in matches: if name.endswith(m): return True if name.lower().endswith('_' + m.lower()): return True if name.lower() == m.lower(): return True return False def _object_reducer(o, names=('id', 'name', 'path', 'httpMethod', 'statusCode', 'Created', 'Deleted', 'Updated', 'Flushed', 'Associated', 'Disassociated')): ''' Helper function to reduce the amount of information that will be kept in the change log for API GW related return values ''' result = {} if isinstance(o, dict): for k, v in six.iteritems(o): if isinstance(v, dict): reduced = v if k == 'variables' else _object_reducer(v, names) if reduced or _name_matches(k, names): result[k] = reduced elif isinstance(v, list): newlist = [] for val in v: reduced = _object_reducer(val, names) if reduced or _name_matches(k, names): newlist.append(reduced) if newlist: result[k] = newlist else: if _name_matches(k, names): result[k] = v return result def _log_error_and_abort(ret, obj): ''' helper function to update errors in the return structure ''' ret['result'] = False ret['abort'] = True if 'error' in obj: ret['comment'] = '{0}'.format(obj.get('error')) return ret class _Swagger(object): ''' this is a helper class that holds the swagger definition file and the associated logic related to how to interpret the file and apply it to AWS Api Gateway. The main interface to the outside world is in deploy_api, deploy_models, and deploy_resources methods. ''' SWAGGER_OBJ_V2_FIELDS = ('swagger', 'info', 'host', 'basePath', 'schemes', 'consumes', 'produces', 'paths', 'definitions', 'parameters', 'responses', 'securityDefinitions', 'security', 'tags', 'externalDocs') # SWAGGER OBJECT V2 Fields that are required by boto apigateway states. SWAGGER_OBJ_V2_FIELDS_REQUIRED = ('swagger', 'info', 'basePath', 'schemes', 'paths', 'definitions') # SWAGGER OPERATION NAMES SWAGGER_OPERATION_NAMES = ('get', 'put', 'post', 'delete', 'options', 'head', 'patch') SWAGGER_VERSIONS_SUPPORTED = ('2.0',) # VENDOR SPECIFIC FIELD PATTERNS VENDOR_EXT_PATTERN = re.compile('^x-') # JSON_SCHEMA_REF JSON_SCHEMA_DRAFT_4 = 'http://json-schema.org/draft-04/schema#' # AWS integration templates for normal and options methods REQUEST_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n' '{\n' '"header_params" : {\n' '#set ($map = $input.params().header)\n' '#foreach( $param in $map.entrySet() )\n' '"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n' '#end\n' '},\n' '"query_params" : {\n' '#set ($map = $input.params().querystring)\n' '#foreach( $param in $map.entrySet() )\n' '"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n' '#end\n' '},\n' '"path_params" : {\n' '#set ($map = $input.params().path)\n' '#foreach( $param in $map.entrySet() )\n' '"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n' '#end\n' '},\n' '"apigw_context" : {\n' '"apiId": "$context.apiId",\n' '"httpMethod": "$context.httpMethod",\n' '"requestId": "$context.requestId",\n' '"resourceId": "$context.resourceId",\n' '"resourcePath": "$context.resourcePath",\n' '"stage": "$context.stage",\n' '"identity": {\n' ' "user":"$context.identity.user",\n' ' "userArn":"$context.identity.userArn",\n' ' "userAgent":"$context.identity.userAgent",\n' ' "sourceIp":"$context.identity.sourceIp",\n' ' "cognitoIdentityId":"$context.identity.cognitoIdentityId",\n' ' "cognitoIdentityPoolId":"$context.identity.cognitoIdentityPoolId",\n' ' "cognitoAuthenticationType":"$context.identity.cognitoAuthenticationType",\n' ' "cognitoAuthenticationProvider":["$util.escapeJavaScript($context.identity.cognitoAuthenticationProvider)"],\n' ' "caller":"$context.identity.caller",\n' ' "apiKey":"$context.identity.apiKey",\n' ' "accountId":"$context.identity.accountId"\n' '}\n' '},\n' '"body_params" : $input.json(\'$\'),\n' '"stage_variables": {\n' '#foreach($variable in $stageVariables.keySet())\n' '"$variable": "$util.escapeJavaScript($stageVariables.get($variable))"\n' '#if($foreach.hasNext), #end\n' '#end\n' '}\n' '}'} REQUEST_OPTION_TEMPLATE = {'application/json': '{"statusCode": 200}'} # AWS integration response template mapping to convert stackTrace part or the error # to a uniform format containing strings only. Swagger does not seem to allow defining # an array of non-uniform types, to it is not possible to create error model to match # exactly what comes out of lambda functions in case of error. RESPONSE_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n' '{\n' ' "errorMessage" : "$inputRoot.errorMessage",\n' ' "errorType" : "$inputRoot.errorType",\n' ' "stackTrace" : [\n' '#foreach($stackTrace in $inputRoot.stackTrace)\n' ' [\n' '#foreach($elem in $stackTrace)\n' ' "$elem"\n' '#if($foreach.hasNext),#end\n' '#end\n' ' ]\n' '#if($foreach.hasNext),#end\n' '#end\n' ' ]\n' '}'} RESPONSE_OPTION_TEMPLATE = {} # This string should not be modified, every API created by this state will carry the description # below. AWS_API_DESCRIPTION = _dict_to_json_pretty({"provisioned_by": "Salt boto_apigateway.present State", "context": "See deployment or stage description"}) class SwaggerParameter(object): ''' This is a helper class for the Swagger Parameter Object ''' LOCATIONS = ('body', 'query', 'header', 'path') def __init__(self, paramdict): self._paramdict = paramdict @property def location(self): ''' returns location in the swagger parameter object ''' _location = self._paramdict.get('in') if _location in _Swagger.SwaggerParameter.LOCATIONS: return _location raise ValueError('Unsupported parameter location: {0} in Parameter Object'.format(_location)) @property def name(self): ''' returns parameter name in the swagger parameter object ''' _name = self._paramdict.get('name') if _name: if self.location == 'header': return 'method.request.header.{0}'.format(_name) elif self.location == 'query': return 'method.request.querystring.{0}'.format(_name) elif self.location == 'path': return 'method.request.path.{0}'.format(_name) return None raise ValueError('Parameter must have a name: {0}'.format(_dict_to_json_pretty(self._paramdict))) @property def schema(self): ''' returns the name of the schema given the reference in the swagger parameter object ''' if self.location == 'body': _schema = self._paramdict.get('schema') if _schema: if '$ref' in _schema: schema_name = _schema.get('$ref').split('/')[-1] return schema_name raise ValueError(('Body parameter must have a JSON reference ' 'to the schema definition due to Amazon API restrictions: {0}'.format(self.name))) raise ValueError('Body parameter must have a schema: {0}'.format(self.name)) return None class SwaggerMethodResponse(object): ''' Helper class for Swagger Method Response Object ''' def __init__(self, r): self._r = r @property def schema(self): ''' returns the name of the schema given the reference in the swagger method response object ''' _schema = self._r.get('schema') if _schema: if '$ref' in _schema: return _schema.get('$ref').split('/')[-1] raise ValueError(('Method response must have a JSON reference ' 'to the schema definition: {0}'.format(_schema))) return None @property def headers(self): ''' returns the headers dictionary in the method response object ''' _headers = self._r.get('headers', {}) return _headers def __init__(self, api_name, stage_name, lambda_funcname_format, swagger_file_path, error_response_template, response_template, common_aws_args): self._api_name = api_name self._stage_name = stage_name self._lambda_funcname_format = lambda_funcname_format self._common_aws_args = common_aws_args self._restApiId = '' self._deploymentId = '' self._error_response_template = error_response_template self._response_template = response_template if swagger_file_path is not None: if os.path.exists(swagger_file_path) and os.path.isfile(swagger_file_path): self._swagger_file = swagger_file_path self._md5_filehash = _gen_md5_filehash(self._swagger_file, error_response_template, response_template) with salt.utils.files.fopen(self._swagger_file, 'rb') as sf: self._cfg = salt.utils.yaml.safe_load(sf) self._swagger_version = '' else: raise IOError('Invalid swagger file path, {0}'.format(swagger_file_path)) self._validate_swagger_file() self._validate_lambda_funcname_format() self._resolve_api_id() def _is_http_error_rescode(self, code): ''' Helper function to determine if the passed code is in the 400~599 range of http error codes ''' return bool(re.match(r'^\s*[45]\d\d\s*$', code)) def _validate_error_response_model(self, paths, mods): ''' Helper function to help validate the convention established in the swagger file on how to handle response code mapping/integration ''' for path, ops in paths: for opname, opobj in six.iteritems(ops): if opname not in _Swagger.SWAGGER_OPERATION_NAMES: continue if 'responses' not in opobj: raise ValueError('missing mandatory responses field in path item object') for rescode, resobj in six.iteritems(opobj.get('responses')): if not self._is_http_error_rescode(str(rescode)): # future lint: disable=blacklisted-function continue # only check for response code from 400-599 if 'schema' not in resobj: raise ValueError('missing schema field in path {0}, ' 'op {1}, response {2}'.format(path, opname, rescode)) schemaobj = resobj.get('schema') if '$ref' not in schemaobj: raise ValueError('missing $ref field under schema in ' 'path {0}, op {1}, response {2}'.format(path, opname, rescode)) schemaobjref = schemaobj.get('$ref', '/') modelname = schemaobjref.split('/')[-1] if modelname not in mods: raise ValueError('model schema {0} reference not found ' 'under /definitions'.format(schemaobjref)) model = mods.get(modelname) if model.get('type') != 'object': raise ValueError('model schema {0} must be type object'.format(modelname)) if 'properties' not in model: raise ValueError('model schema {0} must have properties fields'.format(modelname)) modelprops = model.get('properties') if 'errorMessage' not in modelprops: raise ValueError('model schema {0} must have errorMessage as a property to ' 'match AWS convention. If pattern is not set, .+ will ' 'be used'.format(modelname)) def _validate_lambda_funcname_format(self): ''' Checks if the lambda function name format contains only known elements :return: True on success, ValueError raised on error ''' try: if self._lambda_funcname_format: known_kwargs = dict(stage='', api='', resource='', method='') self._lambda_funcname_format.format(**known_kwargs) return True except Exception: raise ValueError('Invalid lambda_funcname_format {0}. Please review ' 'documentation for known substitutable keys'.format(self._lambda_funcname_format)) def _validate_swagger_file(self): ''' High level check/validation of the input swagger file based on https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md This is not a full schema compliance check, but rather make sure that the input file (YAML or JSON) can be read into a dictionary, and we check for the content of the Swagger Object for version and info. ''' # check for any invalid fields for Swagger Object V2 for field in self._cfg: if (field not in _Swagger.SWAGGER_OBJ_V2_FIELDS and not _Swagger.VENDOR_EXT_PATTERN.match(field)): raise ValueError('Invalid Swagger Object Field: {0}'.format(field)) # check for Required Swagger fields by Saltstack boto apigateway state for field in _Swagger.SWAGGER_OBJ_V2_FIELDS_REQUIRED: if field not in self._cfg: raise ValueError('Missing Swagger Object Field: {0}'.format(field)) # check for Swagger Version self._swagger_version = self._cfg.get('swagger') if self._swagger_version not in _Swagger.SWAGGER_VERSIONS_SUPPORTED: raise ValueError('Unsupported Swagger version: {0},' 'Supported versions are {1}'.format(self._swagger_version, _Swagger.SWAGGER_VERSIONS_SUPPORTED)) log.info(type(self._models)) self._validate_error_response_model(self.paths, self._models()) @property def md5_filehash(self): ''' returns md5 hash for the swagger file ''' return self._md5_filehash @property def info(self): ''' returns the swagger info object as a dictionary ''' info = self._cfg.get('info') if not info: raise ValueError('Info Object has no values') return info @property def info_json(self): ''' returns the swagger info object as a pretty printed json string. ''' return _dict_to_json_pretty(self.info) @property def rest_api_name(self): ''' returns the name of the api ''' return self._api_name @property def rest_api_version(self): ''' returns the version field in the swagger info object ''' version = self.info.get('version') if not version: raise ValueError('Missing version value in Info Object') return version def _models(self): ''' returns an iterator for the models specified in the swagger file ''' models = self._cfg.get('definitions') if not models: raise ValueError('Definitions Object has no values, You need to define them in your swagger file') return models def models(self): ''' generator to return the tuple of model and its schema to create on aws. ''' model_dict = self._build_all_dependencies() while True: model = self._get_model_without_dependencies(model_dict) if not model: break yield (model, self._models().get(model)) @property def paths(self): ''' returns an iterator for the relative resource paths specified in the swagger file ''' paths = self._cfg.get('paths') if not paths: raise ValueError('Paths Object has no values, You need to define them in your swagger file') for path in paths: if not path.startswith('/'): raise ValueError('Path object {0} should start with /. Please fix it'.format(path)) return six.iteritems(paths) @property def basePath(self): ''' returns the base path field as defined in the swagger file ''' basePath = self._cfg.get('basePath', '') return basePath @property def restApiId(self): ''' returns the rest api id as returned by AWS on creation of the rest api ''' return self._restApiId @restApiId.setter def restApiId(self, restApiId): ''' allows the assignment of the rest api id on creation of the rest api ''' self._restApiId = restApiId @property def deployment_label_json(self): ''' this property returns the unique description in pretty printed json for a particular api deployment ''' return _dict_to_json_pretty(self.deployment_label) @property def deployment_label(self): ''' this property returns the deployment label dictionary (mainly used by stage description) ''' label = dict() label['swagger_info_object'] = self.info label['api_name'] = self.rest_api_name label['swagger_file'] = os.path.basename(self._swagger_file) label['swagger_file_md5sum'] = self.md5_filehash return label # methods to interact with boto_apigateway execution modules def _one_or_more_stages_remain(self, deploymentId): ''' Helper function to find whether there are other stages still associated with a deployment ''' stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args).get('stages') return bool(stages) def no_more_deployments_remain(self): ''' Helper function to find whether there are deployments left with stages associated ''' no_more_deployments = True deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId, **self._common_aws_args).get('deployments') if deployments: for deployment in deployments: deploymentId = deployment.get('id') stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args).get('stages') if stages: no_more_deployments = False break return no_more_deployments def _get_current_deployment_id(self): ''' Helper method to find the deployment id that the stage name is currently assocaited with. ''' deploymentId = '' stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, **self._common_aws_args).get('stage') if stage: deploymentId = stage.get('deploymentId') return deploymentId def _get_current_deployment_label(self): ''' Helper method to find the deployment label that the stage_name is currently associated with. ''' deploymentId = self._get_current_deployment_id() deployment = __salt__['boto_apigateway.describe_api_deployment'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args).get('deployment') if deployment: return deployment.get('description') return None def _get_desired_deployment_id(self): ''' Helper method to return the deployment id matching the desired deployment label for this Swagger object based on the given api_name, swagger_file ''' deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId, **self._common_aws_args).get('deployments') if deployments: for deployment in deployments: if deployment.get('description') == self.deployment_label_json: return deployment.get('id') return '' def overwrite_stage_variables(self, ret, stage_variables): ''' overwrite the given stage_name's stage variables with the given stage_variables ''' res = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId, stageName=self._stage_name, variables=stage_variables, **self._common_aws_args) if not res.get('overwrite'): ret['result'] = False ret['abort'] = True ret['comment'] = res.get('error') else: ret = _log_changes(ret, 'overwrite_stage_variables', res.get('stage')) return ret def _set_current_deployment(self, stage_desc_json, stage_variables): ''' Helper method to associate the stage_name to the given deploymentId and make this current ''' stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, **self._common_aws_args).get('stage') if not stage: stage = __salt__['boto_apigateway.create_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, deploymentId=self._deploymentId, description=stage_desc_json, variables=stage_variables, **self._common_aws_args) if not stage.get('stage'): return {'set': False, 'error': stage.get('error')} else: # overwrite the stage variables overwrite = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId, stageName=self._stage_name, variables=stage_variables, **self._common_aws_args) if not overwrite.get('stage'): return {'set': False, 'error': overwrite.get('error')} return __salt__['boto_apigateway.activate_api_deployment'](restApiId=self.restApiId, stageName=self._stage_name, deploymentId=self._deploymentId, **self._common_aws_args) def _resolve_api_id(self): ''' returns an Api Id that matches the given api_name and the hardcoded _Swagger.AWS_API_DESCRIPTION as the api description ''' apis = __salt__['boto_apigateway.describe_apis'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args).get('restapi') if apis: if len(apis) == 1: self.restApiId = apis[0].get('id') else: raise ValueError('Multiple APIs matching given name {0} and ' 'description {1}'.format(self.rest_api_name, self.info_json)) def delete_stage(self, ret): ''' Method to delete the given stage_name. If the current deployment tied to the given stage_name has no other stages associated with it, the deployment will be removed as well ''' deploymentId = self._get_current_deployment_id() if deploymentId: result = __salt__['boto_apigateway.delete_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, **self._common_aws_args) if not result.get('deleted'): ret['abort'] = True ret['result'] = False ret['comment'] = 'delete_stage delete_api_stage, {0}'.format(result.get('error')) else: # check if it is safe to delete the deployment as well. if not self._one_or_more_stages_remain(deploymentId): result = __salt__['boto_apigateway.delete_api_deployment'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args) if not result.get('deleted'): ret['abort'] = True ret['result'] = False ret['comment'] = 'delete_stage delete_api_deployment, {0}'.format(result.get('error')) else: ret['comment'] = 'stage {0} has been deleted.\n'.format(self._stage_name) else: # no matching stage_name/deployment found ret['comment'] = 'stage {0} does not exist'.format(self._stage_name) return ret def verify_api(self, ret): ''' this method helps determine if the given stage_name is already on a deployment label matching the input api_name, swagger_file. If yes, returns abort with comment indicating already at desired state. If not and there is previous deployment labels in AWS matching the given input api_name and swagger file, indicate to the caller that we only need to reassociate stage_name to the previously existing deployment label. ''' if self.restApiId: deployed_label_json = self._get_current_deployment_label() if deployed_label_json == self.deployment_label_json: ret['comment'] = ('Already at desired state, the stage {0} is already at the desired ' 'deployment label:\n{1}'.format(self._stage_name, deployed_label_json)) ret['current'] = True return ret else: self._deploymentId = self._get_desired_deployment_id() if self._deploymentId: ret['publish'] = True return ret def publish_api(self, ret, stage_variables): ''' this method tie the given stage_name to a deployment matching the given swagger_file ''' stage_desc = dict() stage_desc['current_deployment_label'] = self.deployment_label stage_desc_json = _dict_to_json_pretty(stage_desc) if self._deploymentId: # just do a reassociate of stage_name to an already existing deployment res = self._set_current_deployment(stage_desc_json, stage_variables) if not res.get('set'): ret['abort'] = True ret['result'] = False ret['comment'] = res.get('error') else: ret = _log_changes(ret, 'publish_api (reassociate deployment, set stage_variables)', res.get('response')) else: # no deployment existed for the given swagger_file for this Swagger object res = __salt__['boto_apigateway.create_api_deployment'](restApiId=self.restApiId, stageName=self._stage_name, stageDescription=stage_desc_json, description=self.deployment_label_json, variables=stage_variables, **self._common_aws_args) if not res.get('created'): ret['abort'] = True ret['result'] = False ret['comment'] = res.get('error') else: ret = _log_changes(ret, 'publish_api (new deployment)', res.get('deployment')) return ret def _cleanup_api(self): ''' Helper method to clean up resources and models if we detected a change in the swagger file for a stage ''' resources = __salt__['boto_apigateway.describe_api_resources'](restApiId=self.restApiId, **self._common_aws_args) if resources.get('resources'): res = resources.get('resources')[1:] res.reverse() for resource in res: delres = __salt__['boto_apigateway.delete_api_resources'](restApiId=self.restApiId, path=resource.get('path'), **self._common_aws_args) if not delres.get('deleted'): return delres models = __salt__['boto_apigateway.describe_api_models'](restApiId=self.restApiId, **self._common_aws_args) if models.get('models'): for model in models.get('models'): delres = __salt__['boto_apigateway.delete_api_model'](restApiId=self.restApiId, modelName=model.get('name'), **self._common_aws_args) if not delres.get('deleted'): return delres return {'deleted': True} def deploy_api(self, ret): ''' this method create the top level rest api in AWS apigateway ''' if self.restApiId: res = self._cleanup_api() if not res.get('deleted'): ret['comment'] = 'Failed to cleanup restAreId {0}'.format(self.restApiId) ret['abort'] = True ret['result'] = False return ret return ret response = __salt__['boto_apigateway.create_api'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args) if not response.get('created'): ret['result'] = False ret['abort'] = True if 'error' in response: ret['comment'] = 'Failed to create rest api: {0}.'.format(response['error']['message']) return ret self.restApiId = response.get('restapi', {}).get('id') return _log_changes(ret, 'deploy_api', response.get('restapi')) def delete_api(self, ret): ''' Method to delete a Rest Api named defined in the swagger file's Info Object's title value. ret a dictionary for returning status to Saltstack ''' exists_response = __salt__['boto_apigateway.api_exists'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args) if exists_response.get('exists'): if __opts__['test']: ret['comment'] = 'Rest API named {0} is set to be deleted.'.format(self.rest_api_name) ret['result'] = None ret['abort'] = True return ret delete_api_response = __salt__['boto_apigateway.delete_api'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args) if not delete_api_response.get('deleted'): ret['result'] = False ret['abort'] = True if 'error' in delete_api_response: ret['comment'] = 'Failed to delete rest api: {0}.'.format(delete_api_response['error']['message']) return ret ret = _log_changes(ret, 'delete_api', delete_api_response) else: ret['comment'] = ('api already absent for swagger file: ' '{0}, desc: {1}'.format(self.rest_api_name, self.info_json)) return ret def _aws_model_ref_from_swagger_ref(self, r): ''' Helper function to reference models created on aws apigw ''' model_name = r.split('/')[-1] return 'https://apigateway.amazonaws.com/restapis/{0}/models/{1}'.format(self.restApiId, model_name) def _update_schema_to_aws_notation(self, schema): ''' Helper function to map model schema to aws notation ''' result = {} for k, v in schema.items(): if k == '$ref': v = self._aws_model_ref_from_swagger_ref(v) if isinstance(v, dict): v = self._update_schema_to_aws_notation(v) result[k] = v return result def _build_dependent_model_list(self, obj_schema): ''' Helper function to build the list of models the given object schema is referencing. ''' dep_models_list = [] if obj_schema: obj_schema['type'] = obj_schema.get('type', 'object') if obj_schema['type'] == 'array': dep_models_list.extend(self._build_dependent_model_list(obj_schema.get('items', {}))) else: ref = obj_schema.get('$ref') if ref: ref_obj_model = ref.split("/")[-1] ref_obj_schema = self._models().get(ref_obj_model) dep_models_list.extend(self._build_dependent_model_list(ref_obj_schema)) dep_models_list.extend([ref_obj_model]) else: # need to walk each property object properties = obj_schema.get('properties') if properties: for _, prop_obj_schema in six.iteritems(properties): dep_models_list.extend(self._build_dependent_model_list(prop_obj_schema)) return list(set(dep_models_list)) def _build_all_dependencies(self): ''' Helper function to build a map of model to their list of model reference dependencies ''' ret = {} for model, schema in six.iteritems(self._models()): dep_list = self._build_dependent_model_list(schema) ret[model] = dep_list return ret def _get_model_without_dependencies(self, models_dict): ''' Helper function to find the next model that should be created ''' next_model = None if not models_dict: return next_model for model, dependencies in six.iteritems(models_dict): if dependencies == []: next_model = model break if next_model is None: raise ValueError('incomplete model definitions, models in dependency ' 'list not defined: {0}'.format(models_dict)) # remove the model from other depednencies before returning models_dict.pop(next_model) for model, dep_list in six.iteritems(models_dict): if next_model in dep_list: dep_list.remove(next_model) return next_model def deploy_models(self, ret): ''' Method to deploy swagger file's definition objects and associated schema to AWS Apigateway as Models ret a dictionary for returning status to Saltstack ''' for model, schema in self.models(): # add in a few attributes into the model schema that AWS expects # _schema = schema.copy() _schema = self._update_schema_to_aws_notation(schema) _schema.update({'$schema': _Swagger.JSON_SCHEMA_DRAFT_4, 'title': '{0} Schema'.format(model)}) # check to see if model already exists, aws has 2 default models [Empty, Error] # which may need upate with data from swagger file model_exists_response = __salt__['boto_apigateway.api_model_exists'](restApiId=self.restApiId, modelName=model, **self._common_aws_args) if model_exists_response.get('exists'): update_model_schema_response = ( __salt__['boto_apigateway.update_api_model_schema'](restApiId=self.restApiId, modelName=model, schema=_dict_to_json_pretty(_schema), **self._common_aws_args)) if not update_model_schema_response.get('updated'): ret['result'] = False ret['abort'] = True if 'error' in update_model_schema_response: ret['comment'] = ('Failed to update existing model {0} with schema {1}, ' 'error: {2}'.format(model, _dict_to_json_pretty(schema), update_model_schema_response['error']['message'])) return ret ret = _log_changes(ret, 'deploy_models', update_model_schema_response) else: create_model_response = ( __salt__['boto_apigateway.create_api_model'](restApiId=self.restApiId, modelName=model, modelDescription=model, schema=_dict_to_json_pretty(_schema), contentType='application/json', **self._common_aws_args)) if not create_model_response.get('created'): ret['result'] = False ret['abort'] = True if 'error' in create_model_response: ret['comment'] = ('Failed to create model {0}, schema {1}, ' 'error: {2}'.format(model, _dict_to_json_pretty(schema), create_model_response['error']['message'])) return ret ret = _log_changes(ret, 'deploy_models', create_model_response) return ret def _lambda_name(self, resourcePath, httpMethod): ''' Helper method to construct lambda name based on the rule specified in doc string of boto_apigateway.api_present function ''' lambda_name = self._lambda_funcname_format.format(stage=self._stage_name, api=self.rest_api_name, resource=resourcePath, method=httpMethod) lambda_name = lambda_name.strip() lambda_name = re.sub(r'{|}', '', lambda_name) lambda_name = re.sub(r'\s+|/', '_', lambda_name).lower() return re.sub(r'_+', '_', lambda_name) def _lambda_uri(self, lambda_name, lambda_region): ''' Helper Method to construct the lambda uri for use in method integration ''' profile = self._common_aws_args.get('profile') region = self._common_aws_args.get('region') lambda_region = __utils__['boto3.get_region']('lambda', lambda_region, profile) apigw_region = __utils__['boto3.get_region']('apigateway', region, profile) lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args) if lambda_region != apigw_region: if not lambda_desc.get('function'): # try look up in the same region as the apigateway as well if previous lookup failed lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args) if not lambda_desc.get('function'): raise ValueError('Could not find lambda function {0} in ' 'regions [{1}, {2}].'.format(lambda_name, lambda_region, apigw_region)) lambda_arn = lambda_desc.get('function').get('FunctionArn') lambda_uri = ('arn:aws:apigateway:{0}:lambda:path/2015-03-31' '/functions/{1}/invocations'.format(apigw_region, lambda_arn)) return lambda_uri def _parse_method_data(self, method_name, method_data): ''' Helper function to construct the method request params, models, request_templates and integration_type values needed to configure method request integration/mappings. ''' method_params = {} method_models = {} if 'parameters' in method_data: for param in method_data['parameters']: p = _Swagger.SwaggerParameter(param) if p.name: method_params[p.name] = True if p.schema: method_models['application/json'] = p.schema request_templates = _Swagger.REQUEST_OPTION_TEMPLATE if method_name == 'options' else _Swagger.REQUEST_TEMPLATE integration_type = "MOCK" if method_name == 'options' else "AWS" return {'params': method_params, 'models': method_models, 'request_templates': request_templates, 'integration_type': integration_type} def _find_patterns(self, o): result = [] if isinstance(o, dict): for k, v in six.iteritems(o): if isinstance(v, dict): result.extend(self._find_patterns(v)) else: if k == 'pattern': result.append(v) return result def _get_pattern_for_schema(self, schema_name, httpStatus): ''' returns the pattern specified in a response schema ''' defaultPattern = '.+' if self._is_http_error_rescode(httpStatus) else '.*' model = self._models().get(schema_name) patterns = self._find_patterns(model) return patterns[0] if patterns else defaultPattern def _get_response_template(self, method_name, http_status): if method_name == 'options' or not self._is_http_error_rescode(http_status): response_templates = {'application/json': self._response_template} \ if self._response_template else self.RESPONSE_OPTION_TEMPLATE else: response_templates = {'application/json': self._error_response_template} \ if self._error_response_template else self.RESPONSE_TEMPLATE return response_templates def _parse_method_response(self, method_name, method_response, httpStatus): ''' Helper function to construct the method response params, models, and integration_params values needed to configure method response integration/mappings. ''' method_response_models = {} method_response_pattern = '.*' if method_response.schema: method_response_models['application/json'] = method_response.schema method_response_pattern = self._get_pattern_for_schema(method_response.schema, httpStatus) method_response_params = {} method_integration_response_params = {} for header in method_response.headers: response_header = 'method.response.header.{0}'.format(header) method_response_params[response_header] = False header_data = method_response.headers.get(header) method_integration_response_params[response_header] = ( "'{0}'".format(header_data.get('default')) if 'default' in header_data else "'*'") response_templates = self._get_response_template(method_name, httpStatus) return {'params': method_response_params, 'models': method_response_models, 'integration_params': method_integration_response_params, 'pattern': method_response_pattern, 'response_templates': response_templates} def _deploy_method(self, ret, resource_path, method_name, method_data, api_key_required, lambda_integration_role, lambda_region, authorization_type): ''' Method to create a method for the given resource path, along with its associated request and response integrations. ret a dictionary for returning status to Saltstack resource_path the full resource path where the named method_name will be associated with. method_name a string that is one of the following values: 'delete', 'get', 'head', 'options', 'patch', 'post', 'put' method_data the value dictionary for this method in the swagger definition file. api_key_required True or False, whether api key is required to access this method. lambda_integration_role name of the IAM role or IAM role arn that Api Gateway will assume when executing the associated lambda function lambda_region the region for the lambda function that Api Gateway will integrate to. authorization_type 'NONE' or 'AWS_IAM' ''' method = self._parse_method_data(method_name.lower(), method_data) # for options method to enable CORS, api_key_required will be set to False always. # authorization_type will be set to 'NONE' always. if method_name.lower() == 'options': api_key_required = False authorization_type = 'NONE' m = __salt__['boto_apigateway.create_api_method'](restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), authorizationType=authorization_type, apiKeyRequired=api_key_required, requestParameters=method.get('params'), requestModels=method.get('models'), **self._common_aws_args) if not m.get('created'): ret = _log_error_and_abort(ret, m) return ret ret = _log_changes(ret, '_deploy_method.create_api_method', m) lambda_uri = "" if method_name.lower() != 'options': lambda_uri = self._lambda_uri(self._lambda_name(resource_path, method_name), lambda_region=lambda_region) # NOTE: integration method is set to POST always, as otherwise AWS makes wrong assumptions # about the intent of the call. HTTP method will be passed to lambda as part of the API gateway context integration = ( __salt__['boto_apigateway.create_api_integration'](restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), integrationType=method.get('integration_type'), integrationHttpMethod='POST', uri=lambda_uri, credentials=lambda_integration_role, requestTemplates=method.get('request_templates'), **self._common_aws_args)) if not integration.get('created'): ret = _log_error_and_abort(ret, integration) return ret ret = _log_changes(ret, '_deploy_method.create_api_integration', integration) if 'responses' in method_data: for response, response_data in six.iteritems(method_data['responses']): httpStatus = str(response) # future lint: disable=blacklisted-function method_response = self._parse_method_response(method_name.lower(), _Swagger.SwaggerMethodResponse(response_data), httpStatus) mr = __salt__['boto_apigateway.create_api_method_response']( restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), statusCode=httpStatus, responseParameters=method_response.get('params'), responseModels=method_response.get('models'), **self._common_aws_args) if not mr.get('created'): ret = _log_error_and_abort(ret, mr) return ret ret = _log_changes(ret, '_deploy_method.create_api_method_response', mr) mir = __salt__['boto_apigateway.create_api_integration_response']( restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), statusCode=httpStatus, selectionPattern=method_response.get('pattern'), responseParameters=method_response.get('integration_params'), responseTemplates=method_response.get('response_templates'), **self._common_aws_args) if not mir.get('created'): ret = _log_error_and_abort(ret, mir) return ret ret = _log_changes(ret, '_deploy_method.create_api_integration_response', mir) else: raise ValueError('No responses specified for {0} {1}'.format(resource_path, method_name)) return ret def deploy_resources(self, ret, api_key_required, lambda_integration_role, lambda_region, authorization_type): ''' Method to deploy resources defined in the swagger file. ret a dictionary for returning status to Saltstack api_key_required True or False, whether api key is required to access this method. lambda_integration_role name of the IAM role or IAM role arn that Api Gateway will assume when executing the associated lambda function lambda_region the region for the lambda function that Api Gateway will integrate to. authorization_type 'NONE' or 'AWS_IAM' ''' for path, pathData in self.paths: resource = __salt__['boto_apigateway.create_api_resources'](restApiId=self.restApiId, path=path, **self._common_aws_args) if not resource.get('created'): ret = _log_error_and_abort(ret, resource) return ret ret = _log_changes(ret, 'deploy_resources', resource) for method, method_data in six.iteritems(pathData): if method in _Swagger.SWAGGER_OPERATION_NAMES: ret = self._deploy_method(ret, path, method, method_data, api_key_required, lambda_integration_role, lambda_region, authorization_type) return ret def usage_plan_present(name, plan_name, description=None, throttle=None, quota=None, region=None, key=None, keyid=None, profile=None): ''' Ensure the spcifieda usage plan with the corresponding metrics is deployed .. versionadded:: 2017.7.0 name name of the state plan_name [Required] name of the usage plan throttle [Optional] throttling parameters expressed as a dictionary. If provided, at least one of the throttling parameters must be present rateLimit rate per second at which capacity bucket is populated burstLimit maximum rate allowed quota [Optional] quota on the number of api calls permitted by the plan. If provided, limit and period must be present limit [Required] number of calls permitted per quota period offset [Optional] number of calls to be subtracted from the limit at the beginning of the period period [Required] period to which quota applies. Must be DAY, WEEK or MONTH .. code-block:: yaml UsagePlanPresent: boto_apigateway.usage_plan_present: - plan_name: my_usage_plan - throttle: rateLimit: 70 burstLimit: 100 - quota: limit: 1000 offset: 0 period: DAY - profile: my_profile ''' func_params = locals() ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) existing = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args) if 'error' in existing: ret['result'] = False ret['comment'] = 'Failed to describe existing usage plans' return ret if not existing['plans']: # plan does not exist, we need to create it if __opts__['test']: ret['comment'] = 'a new usage plan {0} would be created'.format(plan_name) ret['result'] = None return ret result = __salt__['boto_apigateway.create_usage_plan'](name=plan_name, description=description, throttle=throttle, quota=quota, **common_args) if 'error' in result: ret['result'] = False ret['comment'] = 'Failed to create a usage plan {0}, {1}'.format(plan_name, result['error']) return ret ret['changes']['old'] = {'plan': None} ret['comment'] = 'A new usage plan {0} has been created'.format(plan_name) else: # need an existing plan modified to match given value plan = existing['plans'][0] needs_updating = False modifiable_params = (('throttle', ('rateLimit', 'burstLimit')), ('quota', ('limit', 'offset', 'period'))) for p, fields in modifiable_params: for f in fields: actual_param = {} if func_params.get(p) is None else func_params.get(p) if plan.get(p, {}).get(f, None) != actual_param.get(f, None): needs_updating = True break if not needs_updating: ret['comment'] = 'usage plan {0} is already in a correct state'.format(plan_name) ret['result'] = True return ret if __opts__['test']: ret['comment'] = 'a new usage plan {0} would be updated'.format(plan_name) ret['result'] = None return ret result = __salt__['boto_apigateway.update_usage_plan'](plan['id'], throttle=throttle, quota=quota, **common_args) if 'error' in result: ret['result'] = False ret['comment'] = 'Failed to update a usage plan {0}, {1}'.format(plan_name, result['error']) return ret ret['changes']['old'] = {'plan': plan} ret['comment'] = 'usage plan {0} has been updated'.format(plan_name) newstate = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args) if 'error' in existing: ret['result'] = False ret['comment'] = 'Failed to describe existing usage plans after updates' return ret ret['changes']['new'] = {'plan': newstate['plans'][0]} except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret def usage_plan_absent(name, plan_name, region=None, key=None, keyid=None, profile=None): ''' Ensures usage plan identified by name is no longer present .. versionadded:: 2017.7.0 name name of the state plan_name name of the plan to remove .. code-block:: yaml usage plan absent: boto_apigateway.usage_plan_absent: - plan_name: my_usage_plan - profile: my_profile ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) existing = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args) if 'error' in existing: ret['result'] = False ret['comment'] = 'Failed to describe existing usage plans' return ret if not existing['plans']: ret['comment'] = 'Usage plan {0} does not exist already'.format(plan_name) return ret if __opts__['test']: ret['comment'] = 'Usage plan {0} exists and would be deleted'.format(plan_name) ret['result'] = None return ret plan_id = existing['plans'][0]['id'] result = __salt__['boto_apigateway.delete_usage_plan'](plan_id, **common_args) if 'error' in result: ret['result'] = False ret['comment'] = 'Failed to delete usage plan {0}, {1}'.format(plan_name, result) return ret ret['comment'] = 'Usage plan {0} has been deleted'.format(plan_name) ret['changes']['old'] = {'plan': existing['plans'][0]} ret['changes']['new'] = {'plan': None} except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret def usage_plan_association_present(name, plan_name, api_stages, region=None, key=None, keyid=None, profile=None): ''' Ensures usage plan identified by name is added to provided api_stages .. versionadded:: 2017.7.0 name name of the state plan_name name of the plan to use api_stages list of dictionaries, where each dictionary consists of the following keys: apiId apiId of the api to attach usage plan to stage stage name of the api to attach usage plan to .. code-block:: yaml UsagePlanAssociationPresent: boto_apigateway.usage_plan_association_present: - plan_name: my_plan - api_stages: - apiId: 9kb0404ec0 stage: my_stage - apiId: l9v7o2aj90 stage: my_stage - profile: my_profile ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) existing = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args) if 'error' in existing: ret['result'] = False ret['comment'] = 'Failed to describe existing usage plans' return ret if not existing['plans']: ret['comment'] = 'Usage plan {0} does not exist'.format(plan_name) ret['result'] = False return ret if len(existing['plans']) != 1: ret['comment'] = 'There are multiple usage plans with the same name - it is not supported' ret['result'] = False return ret plan = existing['plans'][0] plan_id = plan['id'] plan_stages = plan.get('apiStages', []) stages_to_add = [] for api in api_stages: if api not in plan_stages: stages_to_add.append(api) if not stages_to_add: ret['comment'] = 'Usage plan is already asssociated to all api stages' return ret result = __salt__['boto_apigateway.attach_usage_plan_to_apis'](plan_id, stages_to_add, **common_args) if 'error' in result: ret['comment'] = 'Failed to associate a usage plan {0} to the apis {1}, {2}'.format(plan_name, stages_to_add, result['error']) ret['result'] = False return ret ret['comment'] = 'successfully associated usage plan to apis' ret['changes']['old'] = plan_stages ret['changes']['new'] = result.get('result', {}).get('apiStages', []) except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret def usage_plan_association_absent(name, plan_name, api_stages, region=None, key=None, keyid=None, profile=None): ''' Ensures usage plan identified by name is removed from provided api_stages If a plan is associated to stages not listed in api_stages parameter, those associations remain intact. .. versionadded:: 2017.7.0 name name of the state plan_name name of the plan to use api_stages list of dictionaries, where each dictionary consists of the following keys: apiId apiId of the api to detach usage plan from stage stage name of the api to detach usage plan from .. code-block:: yaml UsagePlanAssociationAbsent: boto_apigateway.usage_plan_association_absent: - plan_name: my_plan - api_stages: - apiId: 9kb0404ec0 stage: my_stage - apiId: l9v7o2aj90 stage: my_stage - profile: my_profile ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) existing = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args) if 'error' in existing: ret['result'] = False ret['comment'] = 'Failed to describe existing usage plans' return ret if not existing['plans']: ret['comment'] = 'Usage plan {0} does not exist'.format(plan_name) ret['result'] = False return ret if len(existing['plans']) != 1: ret['comment'] = 'There are multiple usage plans with the same name - it is not supported' ret['result'] = False return ret plan = existing['plans'][0] plan_id = plan['id'] plan_stages = plan.get('apiStages', []) if not plan_stages: ret['comment'] = 'Usage plan {0} has no associated stages already'.format(plan_name) return ret stages_to_remove = [] for api in api_stages: if api in plan_stages: stages_to_remove.append(api) if not stages_to_remove: ret['comment'] = 'Usage plan is already not asssociated to any api stages' return ret result = __salt__['boto_apigateway.detach_usage_plan_from_apis'](plan_id, stages_to_remove, **common_args) if 'error' in result: ret['comment'] = 'Failed to disassociate a usage plan {0} from the apis {1}, {2}'.format(plan_name, stages_to_remove, result['error']) ret['result'] = False return ret ret['comment'] = 'successfully disassociated usage plan from apis' ret['changes']['old'] = plan_stages ret['changes']['new'] = result.get('result', {}).get('apiStages', []) except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret
saltstack/salt
salt/states/boto_apigateway.py
_log_error_and_abort
python
def _log_error_and_abort(ret, obj): ''' helper function to update errors in the return structure ''' ret['result'] = False ret['abort'] = True if 'error' in obj: ret['comment'] = '{0}'.format(obj.get('error')) return ret
helper function to update errors in the return structure
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_apigateway.py#L524-L532
null
# -*- coding: utf-8 -*- ''' Manage Apigateway Rest APIs =========================== .. versionadded:: 2016.11.0 :depends: - boto >= 2.8.0 - boto3 >= 1.2.1 - botocore >= 1.4.49 Create and destroy rest apis depending on a swagger version 2 definition file. Be aware that this interacts with Amazon's services, and so may incur charges. This module uses ``boto3``, which can be installed via package, or pip. This module accepts explicit vpc credentials but can also utilize IAM roles assigned to the instance through Instance Profiles. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More information available `here <http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html>`_. If IAM roles are not used you need to specify them either in a pillar file or in the minion's config file: .. code-block:: yaml vpc.keyid: GKTADJGHEIQSXMKKRBJ08H vpc.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs It's also possible to specify ``key``, ``keyid`` and ``region`` via a profile, either passed in as a dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 .. code-block:: yaml Ensure Apigateway API exists: boto_apigateway.present: - name: myfunction - region: us-east-1 - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import hashlib import logging import os import re # Import Salt Libs import salt.utils.files import salt.utils.json import salt.utils.yaml # Import 3rd-party libs from salt.ext import six log = logging.getLogger(__name__) def __virtual__(): ''' Only load if boto is available. ''' return 'boto_apigateway' if 'boto_apigateway.describe_apis' in __salt__ else False def present(name, api_name, swagger_file, stage_name, api_key_required, lambda_integration_role, lambda_region=None, stage_variables=None, region=None, key=None, keyid=None, profile=None, lambda_funcname_format='{stage}_{api}_{resource}_{method}', authorization_type='NONE', error_response_template=None, response_template=None): ''' Ensure the spcified api_name with the corresponding swaggerfile is deployed to the given stage_name in AWS ApiGateway. this state currently only supports ApiGateway integration with AWS Lambda, and CORS support is handled through a Mock integration. There may be multiple deployments for the API object, each deployment is tagged with a description (i.e. unique label) in pretty printed json format consisting of the following key/values. .. code-block:: text { "api_name": api_name, "swagger_file": basename_of_swagger_file "swagger_file_md5sum": md5sum_of_swagger_file, "swagger_info_object": info_object_content_in_swagger_file } Please note that the name of the lambda function to be integrated will be derived via the provided lambda_funcname_format parameters: - the default lambda_funcname_format is a string with the following substitutable keys: "{stage}_{api}_{resource}_{method}". The user can choose to reorder the known keys. - the stage key corresponds to the stage_name passed in. - the api key corresponds to the api_name passed in. - the resource corresponds to the resource path defined in the passed swagger file. - the method corresponds to the method for a resource path defined in the passed swagger file. For the default lambda_funcname_format, given the following input: .. code-block:: python api_name = ' Test Service' stage_name = 'alpha' basePath = '/api' path = '/a/{b}/c' method = 'POST' We will end up with the following Lambda Function Name that will be looked up: 'test_service_alpha_a_b_c_post' The canconicalization of these input parameters is done in the following order: 1. lambda_funcname_format is formatted with the input parameters as passed, 2. resulting string is stripped for leading/trailing spaces, 3. path parameter's curly braces are removed from the resource path, 4. consecutive spaces and forward slashes in the paths are replaced with '_' 5. consecutive '_' are replaced with '_' Please note that for error response handling, the swagger file must have an error response model with the following schema. The lambda functions should throw exceptions for any non successful responses. An optional pattern field can be specified in errorMessage field to aid the response mapping from Lambda to the proper error return status codes. .. code-block:: yaml Error: type: object properties: stackTrace: type: array items: type: array items: type: string description: call stack errorType: type: string description: error type errorMessage: type: string description: | Error message, will be matched based on pattern. If no pattern is specified, the default pattern used for response mapping will be +*. name The name of the state definition api_name The name of the rest api that we want to ensure exists in AWS API Gateway swagger_file Name of the location of the swagger rest api definition file in YAML format. stage_name Name of the stage we want to be associated with the given api_name and swagger_file definition api_key_required True or False - whether the API Key is required to call API methods lambda_integration_role The name or ARN of the IAM role that the AWS ApiGateway assumes when it executes your lambda function to handle incoming requests lambda_region The region where we expect to find the lambda functions. This is used to determine the region where we should look for the Lambda Function for integration purposes. The region determination is based on the following priority: 1. lambda_region as passed in (is not None) 2. if lambda_region is None, use the region as if a boto_lambda function were executed without explicitly specifying lambda region. 3. if region determined in (2) is different than the region used by boto_apigateway functions, a final lookup will be attempted using the boto_apigateway region. stage_variables A dict with variables and their values, or a pillar key (string) that contains a dict with variables and their values. key and values in the dict must be strings. {'string': 'string'} region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. lambda_funcname_format Please review the earlier example for the usage. The only substituable keys in the funcname format are {stage}, {api}, {resource}, {method}. Any other keys or positional subsitution parameters will be flagged as an invalid input. authorization_type This field can be either 'NONE', or 'AWS_IAM'. This will be applied to all methods in the given swagger spec file. Default is set to 'NONE' error_response_template String value that defines the response template mapping that should be applied in cases error occurs. Refer to AWS documentation for details: http://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-mapping-template-reference.html If set to None, the following default value is used: .. code-block:: text '#set($inputRoot = $input.path(\'$\'))\\n' '{\\n' ' "errorMessage" : "$inputRoot.errorMessage",\\n' ' "errorType" : "$inputRoot.errorType",\\n' ' "stackTrace" : [\\n' '#foreach($stackTrace in $inputRoot.stackTrace)\\n' ' [\\n' '#foreach($elem in $stackTrace)\\n' ' "$elem"\\n' '#if($foreach.hasNext),#end\\n' '#end\\n' ' ]\\n' '#if($foreach.hasNext),#end\\n' '#end\\n' ' ]\\n' .. versionadded:: 2017.7.0 response_template String value that defines the response template mapping applied in case of success (including OPTIONS method) If set to None, empty ({}) template is assumed, which will transfer response from the lambda function as is. .. versionadded:: 2017.7.0 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) # try to open the swagger file and basic validation swagger = _Swagger(api_name, stage_name, lambda_funcname_format, swagger_file, error_response_template, response_template, common_args) # retrieve stage variables stage_vars = _get_stage_variables(stage_variables) # verify if api and stage already exists ret = swagger.verify_api(ret) if ret.get('publish'): # there is a deployment label with signature matching the given api_name, # swagger file name, swagger file md5 sum, and swagger file info object # just reassociate the stage_name to the given deployment label. if __opts__['test']: ret['comment'] = ('[stage: {0}] will be reassociated to an already available ' 'deployment that matched the given [api_name: {1}] ' 'and [swagger_file: {2}].\n' 'Stage variables will be set ' 'to {3}.'.format(stage_name, api_name, swagger_file, stage_vars)) ret['result'] = None return ret return swagger.publish_api(ret, stage_vars) if ret.get('current'): # already at desired state for the stage, swagger_file, and api_name if __opts__['test']: ret['comment'] = ('[stage: {0}] is already at desired state with an associated ' 'deployment matching the given [api_name: {1}] ' 'and [swagger_file: {2}].\n' 'Stage variables will be set ' 'to {3}.'.format(stage_name, api_name, swagger_file, stage_vars)) ret['result'] = None return swagger.overwrite_stage_variables(ret, stage_vars) # there doesn't exist any previous deployments for the given swagger_file, we need # to redeploy the content of the swagger file to the api, models, and resources object # and finally create a new deployment and tie the stage_name to this new deployment if __opts__['test']: ret['comment'] = ('There is no deployment matching the given [api_name: {0}] ' 'and [swagger_file: {1}]. A new deployment will be ' 'created and the [stage_name: {2}] will then be associated ' 'to the newly created deployment.\n' 'Stage variables will be set ' 'to {3}.'.format(api_name, swagger_file, stage_name, stage_vars)) ret['result'] = None return ret ret = swagger.deploy_api(ret) if ret.get('abort'): return ret ret = swagger.deploy_models(ret) if ret.get('abort'): return ret ret = swagger.deploy_resources(ret, api_key_required=api_key_required, lambda_integration_role=lambda_integration_role, lambda_region=lambda_region, authorization_type=authorization_type) if ret.get('abort'): return ret ret = swagger.publish_api(ret, stage_vars) except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret def _get_stage_variables(stage_variables): ''' Helper function to retrieve stage variables from pillars/options, if the input is a string ''' ret = dict() if stage_variables is None: return ret if isinstance(stage_variables, six.string_types): if stage_variables in __opts__: ret = __opts__[stage_variables] master_opts = __pillar__.get('master', {}) if stage_variables in master_opts: ret = master_opts[stage_variables] if stage_variables in __pillar__: ret = __pillar__[stage_variables] elif isinstance(stage_variables, dict): ret = stage_variables if not isinstance(ret, dict): ret = dict() return ret def absent(name, api_name, stage_name, nuke_api=False, region=None, key=None, keyid=None, profile=None): ''' Ensure the stage_name associated with the given api_name deployed by boto_apigateway's present state is removed. If the currently associated deployment to the given stage_name has no other stages associated with it, the deployment will also be removed. name Name of the swagger file in YAML format api_name Name of the rest api on AWS ApiGateway to ensure is absent. stage_name Name of the stage to be removed irrespective of the swagger file content. If the current deployment associated with the stage_name has no other stages associated with it, the deployment will also be removed. nuke_api If True, removes the API itself only if there are no other stages associated with any other deployments once the given stage_name is removed. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) swagger = _Swagger(api_name, stage_name, '', None, None, None, common_args) if not swagger.restApiId: ret['comment'] = '[Rest API: {0}] does not exist.'.format(api_name) return ret if __opts__['test']: if nuke_api: ret['comment'] = ('[stage: {0}] will be deleted, if there are no other ' 'active stages, the [api: {1} will also be ' 'deleted.'.format(stage_name, api_name)) else: ret['comment'] = ('[stage: {0}] will be deleted.'.format(stage_name)) ret['result'] = None return ret ret = swagger.delete_stage(ret) if ret.get('abort'): return ret if nuke_api and swagger.no_more_deployments_remain(): ret = swagger.delete_api(ret) except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret # Helper Swagger Class for swagger version 2.0 API specification def _gen_md5_filehash(fname, *args): ''' helper function to generate a md5 hash of the swagger definition file any extra argument passed to the function is converted to a string and participates in the hash calculation ''' _hash = hashlib.md5() with salt.utils.files.fopen(fname, 'rb') as f: for chunk in iter(lambda: f.read(4096), b''): _hash.update(chunk) for extra_arg in args: _hash.update(six.b(str(extra_arg))) return _hash.hexdigest() def _dict_to_json_pretty(d, sort_keys=True): ''' helper function to generate pretty printed json output ''' return salt.utils.json.dumps(d, indent=4, separators=(',', ': '), sort_keys=sort_keys) # Heuristic on whether or not the property name loosely matches given set of 'interesting' factors # If you are interested in IDs for example, 'id', 'blah_id', 'blahId' would all match def _name_matches(name, matches): ''' Helper function to see if given name has any of the patterns in given matches ''' for m in matches: if name.endswith(m): return True if name.lower().endswith('_' + m.lower()): return True if name.lower() == m.lower(): return True return False def _object_reducer(o, names=('id', 'name', 'path', 'httpMethod', 'statusCode', 'Created', 'Deleted', 'Updated', 'Flushed', 'Associated', 'Disassociated')): ''' Helper function to reduce the amount of information that will be kept in the change log for API GW related return values ''' result = {} if isinstance(o, dict): for k, v in six.iteritems(o): if isinstance(v, dict): reduced = v if k == 'variables' else _object_reducer(v, names) if reduced or _name_matches(k, names): result[k] = reduced elif isinstance(v, list): newlist = [] for val in v: reduced = _object_reducer(val, names) if reduced or _name_matches(k, names): newlist.append(reduced) if newlist: result[k] = newlist else: if _name_matches(k, names): result[k] = v return result def _log_changes(ret, changekey, changevalue): ''' For logging create/update/delete operations to AWS ApiGateway ''' cl = ret['changes'].get('new', []) cl.append({changekey: _object_reducer(changevalue)}) ret['changes']['new'] = cl return ret class _Swagger(object): ''' this is a helper class that holds the swagger definition file and the associated logic related to how to interpret the file and apply it to AWS Api Gateway. The main interface to the outside world is in deploy_api, deploy_models, and deploy_resources methods. ''' SWAGGER_OBJ_V2_FIELDS = ('swagger', 'info', 'host', 'basePath', 'schemes', 'consumes', 'produces', 'paths', 'definitions', 'parameters', 'responses', 'securityDefinitions', 'security', 'tags', 'externalDocs') # SWAGGER OBJECT V2 Fields that are required by boto apigateway states. SWAGGER_OBJ_V2_FIELDS_REQUIRED = ('swagger', 'info', 'basePath', 'schemes', 'paths', 'definitions') # SWAGGER OPERATION NAMES SWAGGER_OPERATION_NAMES = ('get', 'put', 'post', 'delete', 'options', 'head', 'patch') SWAGGER_VERSIONS_SUPPORTED = ('2.0',) # VENDOR SPECIFIC FIELD PATTERNS VENDOR_EXT_PATTERN = re.compile('^x-') # JSON_SCHEMA_REF JSON_SCHEMA_DRAFT_4 = 'http://json-schema.org/draft-04/schema#' # AWS integration templates for normal and options methods REQUEST_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n' '{\n' '"header_params" : {\n' '#set ($map = $input.params().header)\n' '#foreach( $param in $map.entrySet() )\n' '"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n' '#end\n' '},\n' '"query_params" : {\n' '#set ($map = $input.params().querystring)\n' '#foreach( $param in $map.entrySet() )\n' '"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n' '#end\n' '},\n' '"path_params" : {\n' '#set ($map = $input.params().path)\n' '#foreach( $param in $map.entrySet() )\n' '"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n' '#end\n' '},\n' '"apigw_context" : {\n' '"apiId": "$context.apiId",\n' '"httpMethod": "$context.httpMethod",\n' '"requestId": "$context.requestId",\n' '"resourceId": "$context.resourceId",\n' '"resourcePath": "$context.resourcePath",\n' '"stage": "$context.stage",\n' '"identity": {\n' ' "user":"$context.identity.user",\n' ' "userArn":"$context.identity.userArn",\n' ' "userAgent":"$context.identity.userAgent",\n' ' "sourceIp":"$context.identity.sourceIp",\n' ' "cognitoIdentityId":"$context.identity.cognitoIdentityId",\n' ' "cognitoIdentityPoolId":"$context.identity.cognitoIdentityPoolId",\n' ' "cognitoAuthenticationType":"$context.identity.cognitoAuthenticationType",\n' ' "cognitoAuthenticationProvider":["$util.escapeJavaScript($context.identity.cognitoAuthenticationProvider)"],\n' ' "caller":"$context.identity.caller",\n' ' "apiKey":"$context.identity.apiKey",\n' ' "accountId":"$context.identity.accountId"\n' '}\n' '},\n' '"body_params" : $input.json(\'$\'),\n' '"stage_variables": {\n' '#foreach($variable in $stageVariables.keySet())\n' '"$variable": "$util.escapeJavaScript($stageVariables.get($variable))"\n' '#if($foreach.hasNext), #end\n' '#end\n' '}\n' '}'} REQUEST_OPTION_TEMPLATE = {'application/json': '{"statusCode": 200}'} # AWS integration response template mapping to convert stackTrace part or the error # to a uniform format containing strings only. Swagger does not seem to allow defining # an array of non-uniform types, to it is not possible to create error model to match # exactly what comes out of lambda functions in case of error. RESPONSE_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n' '{\n' ' "errorMessage" : "$inputRoot.errorMessage",\n' ' "errorType" : "$inputRoot.errorType",\n' ' "stackTrace" : [\n' '#foreach($stackTrace in $inputRoot.stackTrace)\n' ' [\n' '#foreach($elem in $stackTrace)\n' ' "$elem"\n' '#if($foreach.hasNext),#end\n' '#end\n' ' ]\n' '#if($foreach.hasNext),#end\n' '#end\n' ' ]\n' '}'} RESPONSE_OPTION_TEMPLATE = {} # This string should not be modified, every API created by this state will carry the description # below. AWS_API_DESCRIPTION = _dict_to_json_pretty({"provisioned_by": "Salt boto_apigateway.present State", "context": "See deployment or stage description"}) class SwaggerParameter(object): ''' This is a helper class for the Swagger Parameter Object ''' LOCATIONS = ('body', 'query', 'header', 'path') def __init__(self, paramdict): self._paramdict = paramdict @property def location(self): ''' returns location in the swagger parameter object ''' _location = self._paramdict.get('in') if _location in _Swagger.SwaggerParameter.LOCATIONS: return _location raise ValueError('Unsupported parameter location: {0} in Parameter Object'.format(_location)) @property def name(self): ''' returns parameter name in the swagger parameter object ''' _name = self._paramdict.get('name') if _name: if self.location == 'header': return 'method.request.header.{0}'.format(_name) elif self.location == 'query': return 'method.request.querystring.{0}'.format(_name) elif self.location == 'path': return 'method.request.path.{0}'.format(_name) return None raise ValueError('Parameter must have a name: {0}'.format(_dict_to_json_pretty(self._paramdict))) @property def schema(self): ''' returns the name of the schema given the reference in the swagger parameter object ''' if self.location == 'body': _schema = self._paramdict.get('schema') if _schema: if '$ref' in _schema: schema_name = _schema.get('$ref').split('/')[-1] return schema_name raise ValueError(('Body parameter must have a JSON reference ' 'to the schema definition due to Amazon API restrictions: {0}'.format(self.name))) raise ValueError('Body parameter must have a schema: {0}'.format(self.name)) return None class SwaggerMethodResponse(object): ''' Helper class for Swagger Method Response Object ''' def __init__(self, r): self._r = r @property def schema(self): ''' returns the name of the schema given the reference in the swagger method response object ''' _schema = self._r.get('schema') if _schema: if '$ref' in _schema: return _schema.get('$ref').split('/')[-1] raise ValueError(('Method response must have a JSON reference ' 'to the schema definition: {0}'.format(_schema))) return None @property def headers(self): ''' returns the headers dictionary in the method response object ''' _headers = self._r.get('headers', {}) return _headers def __init__(self, api_name, stage_name, lambda_funcname_format, swagger_file_path, error_response_template, response_template, common_aws_args): self._api_name = api_name self._stage_name = stage_name self._lambda_funcname_format = lambda_funcname_format self._common_aws_args = common_aws_args self._restApiId = '' self._deploymentId = '' self._error_response_template = error_response_template self._response_template = response_template if swagger_file_path is not None: if os.path.exists(swagger_file_path) and os.path.isfile(swagger_file_path): self._swagger_file = swagger_file_path self._md5_filehash = _gen_md5_filehash(self._swagger_file, error_response_template, response_template) with salt.utils.files.fopen(self._swagger_file, 'rb') as sf: self._cfg = salt.utils.yaml.safe_load(sf) self._swagger_version = '' else: raise IOError('Invalid swagger file path, {0}'.format(swagger_file_path)) self._validate_swagger_file() self._validate_lambda_funcname_format() self._resolve_api_id() def _is_http_error_rescode(self, code): ''' Helper function to determine if the passed code is in the 400~599 range of http error codes ''' return bool(re.match(r'^\s*[45]\d\d\s*$', code)) def _validate_error_response_model(self, paths, mods): ''' Helper function to help validate the convention established in the swagger file on how to handle response code mapping/integration ''' for path, ops in paths: for opname, opobj in six.iteritems(ops): if opname not in _Swagger.SWAGGER_OPERATION_NAMES: continue if 'responses' not in opobj: raise ValueError('missing mandatory responses field in path item object') for rescode, resobj in six.iteritems(opobj.get('responses')): if not self._is_http_error_rescode(str(rescode)): # future lint: disable=blacklisted-function continue # only check for response code from 400-599 if 'schema' not in resobj: raise ValueError('missing schema field in path {0}, ' 'op {1}, response {2}'.format(path, opname, rescode)) schemaobj = resobj.get('schema') if '$ref' not in schemaobj: raise ValueError('missing $ref field under schema in ' 'path {0}, op {1}, response {2}'.format(path, opname, rescode)) schemaobjref = schemaobj.get('$ref', '/') modelname = schemaobjref.split('/')[-1] if modelname not in mods: raise ValueError('model schema {0} reference not found ' 'under /definitions'.format(schemaobjref)) model = mods.get(modelname) if model.get('type') != 'object': raise ValueError('model schema {0} must be type object'.format(modelname)) if 'properties' not in model: raise ValueError('model schema {0} must have properties fields'.format(modelname)) modelprops = model.get('properties') if 'errorMessage' not in modelprops: raise ValueError('model schema {0} must have errorMessage as a property to ' 'match AWS convention. If pattern is not set, .+ will ' 'be used'.format(modelname)) def _validate_lambda_funcname_format(self): ''' Checks if the lambda function name format contains only known elements :return: True on success, ValueError raised on error ''' try: if self._lambda_funcname_format: known_kwargs = dict(stage='', api='', resource='', method='') self._lambda_funcname_format.format(**known_kwargs) return True except Exception: raise ValueError('Invalid lambda_funcname_format {0}. Please review ' 'documentation for known substitutable keys'.format(self._lambda_funcname_format)) def _validate_swagger_file(self): ''' High level check/validation of the input swagger file based on https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md This is not a full schema compliance check, but rather make sure that the input file (YAML or JSON) can be read into a dictionary, and we check for the content of the Swagger Object for version and info. ''' # check for any invalid fields for Swagger Object V2 for field in self._cfg: if (field not in _Swagger.SWAGGER_OBJ_V2_FIELDS and not _Swagger.VENDOR_EXT_PATTERN.match(field)): raise ValueError('Invalid Swagger Object Field: {0}'.format(field)) # check for Required Swagger fields by Saltstack boto apigateway state for field in _Swagger.SWAGGER_OBJ_V2_FIELDS_REQUIRED: if field not in self._cfg: raise ValueError('Missing Swagger Object Field: {0}'.format(field)) # check for Swagger Version self._swagger_version = self._cfg.get('swagger') if self._swagger_version not in _Swagger.SWAGGER_VERSIONS_SUPPORTED: raise ValueError('Unsupported Swagger version: {0},' 'Supported versions are {1}'.format(self._swagger_version, _Swagger.SWAGGER_VERSIONS_SUPPORTED)) log.info(type(self._models)) self._validate_error_response_model(self.paths, self._models()) @property def md5_filehash(self): ''' returns md5 hash for the swagger file ''' return self._md5_filehash @property def info(self): ''' returns the swagger info object as a dictionary ''' info = self._cfg.get('info') if not info: raise ValueError('Info Object has no values') return info @property def info_json(self): ''' returns the swagger info object as a pretty printed json string. ''' return _dict_to_json_pretty(self.info) @property def rest_api_name(self): ''' returns the name of the api ''' return self._api_name @property def rest_api_version(self): ''' returns the version field in the swagger info object ''' version = self.info.get('version') if not version: raise ValueError('Missing version value in Info Object') return version def _models(self): ''' returns an iterator for the models specified in the swagger file ''' models = self._cfg.get('definitions') if not models: raise ValueError('Definitions Object has no values, You need to define them in your swagger file') return models def models(self): ''' generator to return the tuple of model and its schema to create on aws. ''' model_dict = self._build_all_dependencies() while True: model = self._get_model_without_dependencies(model_dict) if not model: break yield (model, self._models().get(model)) @property def paths(self): ''' returns an iterator for the relative resource paths specified in the swagger file ''' paths = self._cfg.get('paths') if not paths: raise ValueError('Paths Object has no values, You need to define them in your swagger file') for path in paths: if not path.startswith('/'): raise ValueError('Path object {0} should start with /. Please fix it'.format(path)) return six.iteritems(paths) @property def basePath(self): ''' returns the base path field as defined in the swagger file ''' basePath = self._cfg.get('basePath', '') return basePath @property def restApiId(self): ''' returns the rest api id as returned by AWS on creation of the rest api ''' return self._restApiId @restApiId.setter def restApiId(self, restApiId): ''' allows the assignment of the rest api id on creation of the rest api ''' self._restApiId = restApiId @property def deployment_label_json(self): ''' this property returns the unique description in pretty printed json for a particular api deployment ''' return _dict_to_json_pretty(self.deployment_label) @property def deployment_label(self): ''' this property returns the deployment label dictionary (mainly used by stage description) ''' label = dict() label['swagger_info_object'] = self.info label['api_name'] = self.rest_api_name label['swagger_file'] = os.path.basename(self._swagger_file) label['swagger_file_md5sum'] = self.md5_filehash return label # methods to interact with boto_apigateway execution modules def _one_or_more_stages_remain(self, deploymentId): ''' Helper function to find whether there are other stages still associated with a deployment ''' stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args).get('stages') return bool(stages) def no_more_deployments_remain(self): ''' Helper function to find whether there are deployments left with stages associated ''' no_more_deployments = True deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId, **self._common_aws_args).get('deployments') if deployments: for deployment in deployments: deploymentId = deployment.get('id') stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args).get('stages') if stages: no_more_deployments = False break return no_more_deployments def _get_current_deployment_id(self): ''' Helper method to find the deployment id that the stage name is currently assocaited with. ''' deploymentId = '' stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, **self._common_aws_args).get('stage') if stage: deploymentId = stage.get('deploymentId') return deploymentId def _get_current_deployment_label(self): ''' Helper method to find the deployment label that the stage_name is currently associated with. ''' deploymentId = self._get_current_deployment_id() deployment = __salt__['boto_apigateway.describe_api_deployment'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args).get('deployment') if deployment: return deployment.get('description') return None def _get_desired_deployment_id(self): ''' Helper method to return the deployment id matching the desired deployment label for this Swagger object based on the given api_name, swagger_file ''' deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId, **self._common_aws_args).get('deployments') if deployments: for deployment in deployments: if deployment.get('description') == self.deployment_label_json: return deployment.get('id') return '' def overwrite_stage_variables(self, ret, stage_variables): ''' overwrite the given stage_name's stage variables with the given stage_variables ''' res = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId, stageName=self._stage_name, variables=stage_variables, **self._common_aws_args) if not res.get('overwrite'): ret['result'] = False ret['abort'] = True ret['comment'] = res.get('error') else: ret = _log_changes(ret, 'overwrite_stage_variables', res.get('stage')) return ret def _set_current_deployment(self, stage_desc_json, stage_variables): ''' Helper method to associate the stage_name to the given deploymentId and make this current ''' stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, **self._common_aws_args).get('stage') if not stage: stage = __salt__['boto_apigateway.create_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, deploymentId=self._deploymentId, description=stage_desc_json, variables=stage_variables, **self._common_aws_args) if not stage.get('stage'): return {'set': False, 'error': stage.get('error')} else: # overwrite the stage variables overwrite = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId, stageName=self._stage_name, variables=stage_variables, **self._common_aws_args) if not overwrite.get('stage'): return {'set': False, 'error': overwrite.get('error')} return __salt__['boto_apigateway.activate_api_deployment'](restApiId=self.restApiId, stageName=self._stage_name, deploymentId=self._deploymentId, **self._common_aws_args) def _resolve_api_id(self): ''' returns an Api Id that matches the given api_name and the hardcoded _Swagger.AWS_API_DESCRIPTION as the api description ''' apis = __salt__['boto_apigateway.describe_apis'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args).get('restapi') if apis: if len(apis) == 1: self.restApiId = apis[0].get('id') else: raise ValueError('Multiple APIs matching given name {0} and ' 'description {1}'.format(self.rest_api_name, self.info_json)) def delete_stage(self, ret): ''' Method to delete the given stage_name. If the current deployment tied to the given stage_name has no other stages associated with it, the deployment will be removed as well ''' deploymentId = self._get_current_deployment_id() if deploymentId: result = __salt__['boto_apigateway.delete_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, **self._common_aws_args) if not result.get('deleted'): ret['abort'] = True ret['result'] = False ret['comment'] = 'delete_stage delete_api_stage, {0}'.format(result.get('error')) else: # check if it is safe to delete the deployment as well. if not self._one_or_more_stages_remain(deploymentId): result = __salt__['boto_apigateway.delete_api_deployment'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args) if not result.get('deleted'): ret['abort'] = True ret['result'] = False ret['comment'] = 'delete_stage delete_api_deployment, {0}'.format(result.get('error')) else: ret['comment'] = 'stage {0} has been deleted.\n'.format(self._stage_name) else: # no matching stage_name/deployment found ret['comment'] = 'stage {0} does not exist'.format(self._stage_name) return ret def verify_api(self, ret): ''' this method helps determine if the given stage_name is already on a deployment label matching the input api_name, swagger_file. If yes, returns abort with comment indicating already at desired state. If not and there is previous deployment labels in AWS matching the given input api_name and swagger file, indicate to the caller that we only need to reassociate stage_name to the previously existing deployment label. ''' if self.restApiId: deployed_label_json = self._get_current_deployment_label() if deployed_label_json == self.deployment_label_json: ret['comment'] = ('Already at desired state, the stage {0} is already at the desired ' 'deployment label:\n{1}'.format(self._stage_name, deployed_label_json)) ret['current'] = True return ret else: self._deploymentId = self._get_desired_deployment_id() if self._deploymentId: ret['publish'] = True return ret def publish_api(self, ret, stage_variables): ''' this method tie the given stage_name to a deployment matching the given swagger_file ''' stage_desc = dict() stage_desc['current_deployment_label'] = self.deployment_label stage_desc_json = _dict_to_json_pretty(stage_desc) if self._deploymentId: # just do a reassociate of stage_name to an already existing deployment res = self._set_current_deployment(stage_desc_json, stage_variables) if not res.get('set'): ret['abort'] = True ret['result'] = False ret['comment'] = res.get('error') else: ret = _log_changes(ret, 'publish_api (reassociate deployment, set stage_variables)', res.get('response')) else: # no deployment existed for the given swagger_file for this Swagger object res = __salt__['boto_apigateway.create_api_deployment'](restApiId=self.restApiId, stageName=self._stage_name, stageDescription=stage_desc_json, description=self.deployment_label_json, variables=stage_variables, **self._common_aws_args) if not res.get('created'): ret['abort'] = True ret['result'] = False ret['comment'] = res.get('error') else: ret = _log_changes(ret, 'publish_api (new deployment)', res.get('deployment')) return ret def _cleanup_api(self): ''' Helper method to clean up resources and models if we detected a change in the swagger file for a stage ''' resources = __salt__['boto_apigateway.describe_api_resources'](restApiId=self.restApiId, **self._common_aws_args) if resources.get('resources'): res = resources.get('resources')[1:] res.reverse() for resource in res: delres = __salt__['boto_apigateway.delete_api_resources'](restApiId=self.restApiId, path=resource.get('path'), **self._common_aws_args) if not delres.get('deleted'): return delres models = __salt__['boto_apigateway.describe_api_models'](restApiId=self.restApiId, **self._common_aws_args) if models.get('models'): for model in models.get('models'): delres = __salt__['boto_apigateway.delete_api_model'](restApiId=self.restApiId, modelName=model.get('name'), **self._common_aws_args) if not delres.get('deleted'): return delres return {'deleted': True} def deploy_api(self, ret): ''' this method create the top level rest api in AWS apigateway ''' if self.restApiId: res = self._cleanup_api() if not res.get('deleted'): ret['comment'] = 'Failed to cleanup restAreId {0}'.format(self.restApiId) ret['abort'] = True ret['result'] = False return ret return ret response = __salt__['boto_apigateway.create_api'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args) if not response.get('created'): ret['result'] = False ret['abort'] = True if 'error' in response: ret['comment'] = 'Failed to create rest api: {0}.'.format(response['error']['message']) return ret self.restApiId = response.get('restapi', {}).get('id') return _log_changes(ret, 'deploy_api', response.get('restapi')) def delete_api(self, ret): ''' Method to delete a Rest Api named defined in the swagger file's Info Object's title value. ret a dictionary for returning status to Saltstack ''' exists_response = __salt__['boto_apigateway.api_exists'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args) if exists_response.get('exists'): if __opts__['test']: ret['comment'] = 'Rest API named {0} is set to be deleted.'.format(self.rest_api_name) ret['result'] = None ret['abort'] = True return ret delete_api_response = __salt__['boto_apigateway.delete_api'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args) if not delete_api_response.get('deleted'): ret['result'] = False ret['abort'] = True if 'error' in delete_api_response: ret['comment'] = 'Failed to delete rest api: {0}.'.format(delete_api_response['error']['message']) return ret ret = _log_changes(ret, 'delete_api', delete_api_response) else: ret['comment'] = ('api already absent for swagger file: ' '{0}, desc: {1}'.format(self.rest_api_name, self.info_json)) return ret def _aws_model_ref_from_swagger_ref(self, r): ''' Helper function to reference models created on aws apigw ''' model_name = r.split('/')[-1] return 'https://apigateway.amazonaws.com/restapis/{0}/models/{1}'.format(self.restApiId, model_name) def _update_schema_to_aws_notation(self, schema): ''' Helper function to map model schema to aws notation ''' result = {} for k, v in schema.items(): if k == '$ref': v = self._aws_model_ref_from_swagger_ref(v) if isinstance(v, dict): v = self._update_schema_to_aws_notation(v) result[k] = v return result def _build_dependent_model_list(self, obj_schema): ''' Helper function to build the list of models the given object schema is referencing. ''' dep_models_list = [] if obj_schema: obj_schema['type'] = obj_schema.get('type', 'object') if obj_schema['type'] == 'array': dep_models_list.extend(self._build_dependent_model_list(obj_schema.get('items', {}))) else: ref = obj_schema.get('$ref') if ref: ref_obj_model = ref.split("/")[-1] ref_obj_schema = self._models().get(ref_obj_model) dep_models_list.extend(self._build_dependent_model_list(ref_obj_schema)) dep_models_list.extend([ref_obj_model]) else: # need to walk each property object properties = obj_schema.get('properties') if properties: for _, prop_obj_schema in six.iteritems(properties): dep_models_list.extend(self._build_dependent_model_list(prop_obj_schema)) return list(set(dep_models_list)) def _build_all_dependencies(self): ''' Helper function to build a map of model to their list of model reference dependencies ''' ret = {} for model, schema in six.iteritems(self._models()): dep_list = self._build_dependent_model_list(schema) ret[model] = dep_list return ret def _get_model_without_dependencies(self, models_dict): ''' Helper function to find the next model that should be created ''' next_model = None if not models_dict: return next_model for model, dependencies in six.iteritems(models_dict): if dependencies == []: next_model = model break if next_model is None: raise ValueError('incomplete model definitions, models in dependency ' 'list not defined: {0}'.format(models_dict)) # remove the model from other depednencies before returning models_dict.pop(next_model) for model, dep_list in six.iteritems(models_dict): if next_model in dep_list: dep_list.remove(next_model) return next_model def deploy_models(self, ret): ''' Method to deploy swagger file's definition objects and associated schema to AWS Apigateway as Models ret a dictionary for returning status to Saltstack ''' for model, schema in self.models(): # add in a few attributes into the model schema that AWS expects # _schema = schema.copy() _schema = self._update_schema_to_aws_notation(schema) _schema.update({'$schema': _Swagger.JSON_SCHEMA_DRAFT_4, 'title': '{0} Schema'.format(model)}) # check to see if model already exists, aws has 2 default models [Empty, Error] # which may need upate with data from swagger file model_exists_response = __salt__['boto_apigateway.api_model_exists'](restApiId=self.restApiId, modelName=model, **self._common_aws_args) if model_exists_response.get('exists'): update_model_schema_response = ( __salt__['boto_apigateway.update_api_model_schema'](restApiId=self.restApiId, modelName=model, schema=_dict_to_json_pretty(_schema), **self._common_aws_args)) if not update_model_schema_response.get('updated'): ret['result'] = False ret['abort'] = True if 'error' in update_model_schema_response: ret['comment'] = ('Failed to update existing model {0} with schema {1}, ' 'error: {2}'.format(model, _dict_to_json_pretty(schema), update_model_schema_response['error']['message'])) return ret ret = _log_changes(ret, 'deploy_models', update_model_schema_response) else: create_model_response = ( __salt__['boto_apigateway.create_api_model'](restApiId=self.restApiId, modelName=model, modelDescription=model, schema=_dict_to_json_pretty(_schema), contentType='application/json', **self._common_aws_args)) if not create_model_response.get('created'): ret['result'] = False ret['abort'] = True if 'error' in create_model_response: ret['comment'] = ('Failed to create model {0}, schema {1}, ' 'error: {2}'.format(model, _dict_to_json_pretty(schema), create_model_response['error']['message'])) return ret ret = _log_changes(ret, 'deploy_models', create_model_response) return ret def _lambda_name(self, resourcePath, httpMethod): ''' Helper method to construct lambda name based on the rule specified in doc string of boto_apigateway.api_present function ''' lambda_name = self._lambda_funcname_format.format(stage=self._stage_name, api=self.rest_api_name, resource=resourcePath, method=httpMethod) lambda_name = lambda_name.strip() lambda_name = re.sub(r'{|}', '', lambda_name) lambda_name = re.sub(r'\s+|/', '_', lambda_name).lower() return re.sub(r'_+', '_', lambda_name) def _lambda_uri(self, lambda_name, lambda_region): ''' Helper Method to construct the lambda uri for use in method integration ''' profile = self._common_aws_args.get('profile') region = self._common_aws_args.get('region') lambda_region = __utils__['boto3.get_region']('lambda', lambda_region, profile) apigw_region = __utils__['boto3.get_region']('apigateway', region, profile) lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args) if lambda_region != apigw_region: if not lambda_desc.get('function'): # try look up in the same region as the apigateway as well if previous lookup failed lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args) if not lambda_desc.get('function'): raise ValueError('Could not find lambda function {0} in ' 'regions [{1}, {2}].'.format(lambda_name, lambda_region, apigw_region)) lambda_arn = lambda_desc.get('function').get('FunctionArn') lambda_uri = ('arn:aws:apigateway:{0}:lambda:path/2015-03-31' '/functions/{1}/invocations'.format(apigw_region, lambda_arn)) return lambda_uri def _parse_method_data(self, method_name, method_data): ''' Helper function to construct the method request params, models, request_templates and integration_type values needed to configure method request integration/mappings. ''' method_params = {} method_models = {} if 'parameters' in method_data: for param in method_data['parameters']: p = _Swagger.SwaggerParameter(param) if p.name: method_params[p.name] = True if p.schema: method_models['application/json'] = p.schema request_templates = _Swagger.REQUEST_OPTION_TEMPLATE if method_name == 'options' else _Swagger.REQUEST_TEMPLATE integration_type = "MOCK" if method_name == 'options' else "AWS" return {'params': method_params, 'models': method_models, 'request_templates': request_templates, 'integration_type': integration_type} def _find_patterns(self, o): result = [] if isinstance(o, dict): for k, v in six.iteritems(o): if isinstance(v, dict): result.extend(self._find_patterns(v)) else: if k == 'pattern': result.append(v) return result def _get_pattern_for_schema(self, schema_name, httpStatus): ''' returns the pattern specified in a response schema ''' defaultPattern = '.+' if self._is_http_error_rescode(httpStatus) else '.*' model = self._models().get(schema_name) patterns = self._find_patterns(model) return patterns[0] if patterns else defaultPattern def _get_response_template(self, method_name, http_status): if method_name == 'options' or not self._is_http_error_rescode(http_status): response_templates = {'application/json': self._response_template} \ if self._response_template else self.RESPONSE_OPTION_TEMPLATE else: response_templates = {'application/json': self._error_response_template} \ if self._error_response_template else self.RESPONSE_TEMPLATE return response_templates def _parse_method_response(self, method_name, method_response, httpStatus): ''' Helper function to construct the method response params, models, and integration_params values needed to configure method response integration/mappings. ''' method_response_models = {} method_response_pattern = '.*' if method_response.schema: method_response_models['application/json'] = method_response.schema method_response_pattern = self._get_pattern_for_schema(method_response.schema, httpStatus) method_response_params = {} method_integration_response_params = {} for header in method_response.headers: response_header = 'method.response.header.{0}'.format(header) method_response_params[response_header] = False header_data = method_response.headers.get(header) method_integration_response_params[response_header] = ( "'{0}'".format(header_data.get('default')) if 'default' in header_data else "'*'") response_templates = self._get_response_template(method_name, httpStatus) return {'params': method_response_params, 'models': method_response_models, 'integration_params': method_integration_response_params, 'pattern': method_response_pattern, 'response_templates': response_templates} def _deploy_method(self, ret, resource_path, method_name, method_data, api_key_required, lambda_integration_role, lambda_region, authorization_type): ''' Method to create a method for the given resource path, along with its associated request and response integrations. ret a dictionary for returning status to Saltstack resource_path the full resource path where the named method_name will be associated with. method_name a string that is one of the following values: 'delete', 'get', 'head', 'options', 'patch', 'post', 'put' method_data the value dictionary for this method in the swagger definition file. api_key_required True or False, whether api key is required to access this method. lambda_integration_role name of the IAM role or IAM role arn that Api Gateway will assume when executing the associated lambda function lambda_region the region for the lambda function that Api Gateway will integrate to. authorization_type 'NONE' or 'AWS_IAM' ''' method = self._parse_method_data(method_name.lower(), method_data) # for options method to enable CORS, api_key_required will be set to False always. # authorization_type will be set to 'NONE' always. if method_name.lower() == 'options': api_key_required = False authorization_type = 'NONE' m = __salt__['boto_apigateway.create_api_method'](restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), authorizationType=authorization_type, apiKeyRequired=api_key_required, requestParameters=method.get('params'), requestModels=method.get('models'), **self._common_aws_args) if not m.get('created'): ret = _log_error_and_abort(ret, m) return ret ret = _log_changes(ret, '_deploy_method.create_api_method', m) lambda_uri = "" if method_name.lower() != 'options': lambda_uri = self._lambda_uri(self._lambda_name(resource_path, method_name), lambda_region=lambda_region) # NOTE: integration method is set to POST always, as otherwise AWS makes wrong assumptions # about the intent of the call. HTTP method will be passed to lambda as part of the API gateway context integration = ( __salt__['boto_apigateway.create_api_integration'](restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), integrationType=method.get('integration_type'), integrationHttpMethod='POST', uri=lambda_uri, credentials=lambda_integration_role, requestTemplates=method.get('request_templates'), **self._common_aws_args)) if not integration.get('created'): ret = _log_error_and_abort(ret, integration) return ret ret = _log_changes(ret, '_deploy_method.create_api_integration', integration) if 'responses' in method_data: for response, response_data in six.iteritems(method_data['responses']): httpStatus = str(response) # future lint: disable=blacklisted-function method_response = self._parse_method_response(method_name.lower(), _Swagger.SwaggerMethodResponse(response_data), httpStatus) mr = __salt__['boto_apigateway.create_api_method_response']( restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), statusCode=httpStatus, responseParameters=method_response.get('params'), responseModels=method_response.get('models'), **self._common_aws_args) if not mr.get('created'): ret = _log_error_and_abort(ret, mr) return ret ret = _log_changes(ret, '_deploy_method.create_api_method_response', mr) mir = __salt__['boto_apigateway.create_api_integration_response']( restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), statusCode=httpStatus, selectionPattern=method_response.get('pattern'), responseParameters=method_response.get('integration_params'), responseTemplates=method_response.get('response_templates'), **self._common_aws_args) if not mir.get('created'): ret = _log_error_and_abort(ret, mir) return ret ret = _log_changes(ret, '_deploy_method.create_api_integration_response', mir) else: raise ValueError('No responses specified for {0} {1}'.format(resource_path, method_name)) return ret def deploy_resources(self, ret, api_key_required, lambda_integration_role, lambda_region, authorization_type): ''' Method to deploy resources defined in the swagger file. ret a dictionary for returning status to Saltstack api_key_required True or False, whether api key is required to access this method. lambda_integration_role name of the IAM role or IAM role arn that Api Gateway will assume when executing the associated lambda function lambda_region the region for the lambda function that Api Gateway will integrate to. authorization_type 'NONE' or 'AWS_IAM' ''' for path, pathData in self.paths: resource = __salt__['boto_apigateway.create_api_resources'](restApiId=self.restApiId, path=path, **self._common_aws_args) if not resource.get('created'): ret = _log_error_and_abort(ret, resource) return ret ret = _log_changes(ret, 'deploy_resources', resource) for method, method_data in six.iteritems(pathData): if method in _Swagger.SWAGGER_OPERATION_NAMES: ret = self._deploy_method(ret, path, method, method_data, api_key_required, lambda_integration_role, lambda_region, authorization_type) return ret def usage_plan_present(name, plan_name, description=None, throttle=None, quota=None, region=None, key=None, keyid=None, profile=None): ''' Ensure the spcifieda usage plan with the corresponding metrics is deployed .. versionadded:: 2017.7.0 name name of the state plan_name [Required] name of the usage plan throttle [Optional] throttling parameters expressed as a dictionary. If provided, at least one of the throttling parameters must be present rateLimit rate per second at which capacity bucket is populated burstLimit maximum rate allowed quota [Optional] quota on the number of api calls permitted by the plan. If provided, limit and period must be present limit [Required] number of calls permitted per quota period offset [Optional] number of calls to be subtracted from the limit at the beginning of the period period [Required] period to which quota applies. Must be DAY, WEEK or MONTH .. code-block:: yaml UsagePlanPresent: boto_apigateway.usage_plan_present: - plan_name: my_usage_plan - throttle: rateLimit: 70 burstLimit: 100 - quota: limit: 1000 offset: 0 period: DAY - profile: my_profile ''' func_params = locals() ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) existing = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args) if 'error' in existing: ret['result'] = False ret['comment'] = 'Failed to describe existing usage plans' return ret if not existing['plans']: # plan does not exist, we need to create it if __opts__['test']: ret['comment'] = 'a new usage plan {0} would be created'.format(plan_name) ret['result'] = None return ret result = __salt__['boto_apigateway.create_usage_plan'](name=plan_name, description=description, throttle=throttle, quota=quota, **common_args) if 'error' in result: ret['result'] = False ret['comment'] = 'Failed to create a usage plan {0}, {1}'.format(plan_name, result['error']) return ret ret['changes']['old'] = {'plan': None} ret['comment'] = 'A new usage plan {0} has been created'.format(plan_name) else: # need an existing plan modified to match given value plan = existing['plans'][0] needs_updating = False modifiable_params = (('throttle', ('rateLimit', 'burstLimit')), ('quota', ('limit', 'offset', 'period'))) for p, fields in modifiable_params: for f in fields: actual_param = {} if func_params.get(p) is None else func_params.get(p) if plan.get(p, {}).get(f, None) != actual_param.get(f, None): needs_updating = True break if not needs_updating: ret['comment'] = 'usage plan {0} is already in a correct state'.format(plan_name) ret['result'] = True return ret if __opts__['test']: ret['comment'] = 'a new usage plan {0} would be updated'.format(plan_name) ret['result'] = None return ret result = __salt__['boto_apigateway.update_usage_plan'](plan['id'], throttle=throttle, quota=quota, **common_args) if 'error' in result: ret['result'] = False ret['comment'] = 'Failed to update a usage plan {0}, {1}'.format(plan_name, result['error']) return ret ret['changes']['old'] = {'plan': plan} ret['comment'] = 'usage plan {0} has been updated'.format(plan_name) newstate = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args) if 'error' in existing: ret['result'] = False ret['comment'] = 'Failed to describe existing usage plans after updates' return ret ret['changes']['new'] = {'plan': newstate['plans'][0]} except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret def usage_plan_absent(name, plan_name, region=None, key=None, keyid=None, profile=None): ''' Ensures usage plan identified by name is no longer present .. versionadded:: 2017.7.0 name name of the state plan_name name of the plan to remove .. code-block:: yaml usage plan absent: boto_apigateway.usage_plan_absent: - plan_name: my_usage_plan - profile: my_profile ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) existing = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args) if 'error' in existing: ret['result'] = False ret['comment'] = 'Failed to describe existing usage plans' return ret if not existing['plans']: ret['comment'] = 'Usage plan {0} does not exist already'.format(plan_name) return ret if __opts__['test']: ret['comment'] = 'Usage plan {0} exists and would be deleted'.format(plan_name) ret['result'] = None return ret plan_id = existing['plans'][0]['id'] result = __salt__['boto_apigateway.delete_usage_plan'](plan_id, **common_args) if 'error' in result: ret['result'] = False ret['comment'] = 'Failed to delete usage plan {0}, {1}'.format(plan_name, result) return ret ret['comment'] = 'Usage plan {0} has been deleted'.format(plan_name) ret['changes']['old'] = {'plan': existing['plans'][0]} ret['changes']['new'] = {'plan': None} except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret def usage_plan_association_present(name, plan_name, api_stages, region=None, key=None, keyid=None, profile=None): ''' Ensures usage plan identified by name is added to provided api_stages .. versionadded:: 2017.7.0 name name of the state plan_name name of the plan to use api_stages list of dictionaries, where each dictionary consists of the following keys: apiId apiId of the api to attach usage plan to stage stage name of the api to attach usage plan to .. code-block:: yaml UsagePlanAssociationPresent: boto_apigateway.usage_plan_association_present: - plan_name: my_plan - api_stages: - apiId: 9kb0404ec0 stage: my_stage - apiId: l9v7o2aj90 stage: my_stage - profile: my_profile ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) existing = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args) if 'error' in existing: ret['result'] = False ret['comment'] = 'Failed to describe existing usage plans' return ret if not existing['plans']: ret['comment'] = 'Usage plan {0} does not exist'.format(plan_name) ret['result'] = False return ret if len(existing['plans']) != 1: ret['comment'] = 'There are multiple usage plans with the same name - it is not supported' ret['result'] = False return ret plan = existing['plans'][0] plan_id = plan['id'] plan_stages = plan.get('apiStages', []) stages_to_add = [] for api in api_stages: if api not in plan_stages: stages_to_add.append(api) if not stages_to_add: ret['comment'] = 'Usage plan is already asssociated to all api stages' return ret result = __salt__['boto_apigateway.attach_usage_plan_to_apis'](plan_id, stages_to_add, **common_args) if 'error' in result: ret['comment'] = 'Failed to associate a usage plan {0} to the apis {1}, {2}'.format(plan_name, stages_to_add, result['error']) ret['result'] = False return ret ret['comment'] = 'successfully associated usage plan to apis' ret['changes']['old'] = plan_stages ret['changes']['new'] = result.get('result', {}).get('apiStages', []) except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret def usage_plan_association_absent(name, plan_name, api_stages, region=None, key=None, keyid=None, profile=None): ''' Ensures usage plan identified by name is removed from provided api_stages If a plan is associated to stages not listed in api_stages parameter, those associations remain intact. .. versionadded:: 2017.7.0 name name of the state plan_name name of the plan to use api_stages list of dictionaries, where each dictionary consists of the following keys: apiId apiId of the api to detach usage plan from stage stage name of the api to detach usage plan from .. code-block:: yaml UsagePlanAssociationAbsent: boto_apigateway.usage_plan_association_absent: - plan_name: my_plan - api_stages: - apiId: 9kb0404ec0 stage: my_stage - apiId: l9v7o2aj90 stage: my_stage - profile: my_profile ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) existing = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args) if 'error' in existing: ret['result'] = False ret['comment'] = 'Failed to describe existing usage plans' return ret if not existing['plans']: ret['comment'] = 'Usage plan {0} does not exist'.format(plan_name) ret['result'] = False return ret if len(existing['plans']) != 1: ret['comment'] = 'There are multiple usage plans with the same name - it is not supported' ret['result'] = False return ret plan = existing['plans'][0] plan_id = plan['id'] plan_stages = plan.get('apiStages', []) if not plan_stages: ret['comment'] = 'Usage plan {0} has no associated stages already'.format(plan_name) return ret stages_to_remove = [] for api in api_stages: if api in plan_stages: stages_to_remove.append(api) if not stages_to_remove: ret['comment'] = 'Usage plan is already not asssociated to any api stages' return ret result = __salt__['boto_apigateway.detach_usage_plan_from_apis'](plan_id, stages_to_remove, **common_args) if 'error' in result: ret['comment'] = 'Failed to disassociate a usage plan {0} from the apis {1}, {2}'.format(plan_name, stages_to_remove, result['error']) ret['result'] = False return ret ret['comment'] = 'successfully disassociated usage plan from apis' ret['changes']['old'] = plan_stages ret['changes']['new'] = result.get('result', {}).get('apiStages', []) except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret
saltstack/salt
salt/states/boto_apigateway.py
usage_plan_present
python
def usage_plan_present(name, plan_name, description=None, throttle=None, quota=None, region=None, key=None, keyid=None, profile=None): ''' Ensure the spcifieda usage plan with the corresponding metrics is deployed .. versionadded:: 2017.7.0 name name of the state plan_name [Required] name of the usage plan throttle [Optional] throttling parameters expressed as a dictionary. If provided, at least one of the throttling parameters must be present rateLimit rate per second at which capacity bucket is populated burstLimit maximum rate allowed quota [Optional] quota on the number of api calls permitted by the plan. If provided, limit and period must be present limit [Required] number of calls permitted per quota period offset [Optional] number of calls to be subtracted from the limit at the beginning of the period period [Required] period to which quota applies. Must be DAY, WEEK or MONTH .. code-block:: yaml UsagePlanPresent: boto_apigateway.usage_plan_present: - plan_name: my_usage_plan - throttle: rateLimit: 70 burstLimit: 100 - quota: limit: 1000 offset: 0 period: DAY - profile: my_profile ''' func_params = locals() ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) existing = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args) if 'error' in existing: ret['result'] = False ret['comment'] = 'Failed to describe existing usage plans' return ret if not existing['plans']: # plan does not exist, we need to create it if __opts__['test']: ret['comment'] = 'a new usage plan {0} would be created'.format(plan_name) ret['result'] = None return ret result = __salt__['boto_apigateway.create_usage_plan'](name=plan_name, description=description, throttle=throttle, quota=quota, **common_args) if 'error' in result: ret['result'] = False ret['comment'] = 'Failed to create a usage plan {0}, {1}'.format(plan_name, result['error']) return ret ret['changes']['old'] = {'plan': None} ret['comment'] = 'A new usage plan {0} has been created'.format(plan_name) else: # need an existing plan modified to match given value plan = existing['plans'][0] needs_updating = False modifiable_params = (('throttle', ('rateLimit', 'burstLimit')), ('quota', ('limit', 'offset', 'period'))) for p, fields in modifiable_params: for f in fields: actual_param = {} if func_params.get(p) is None else func_params.get(p) if plan.get(p, {}).get(f, None) != actual_param.get(f, None): needs_updating = True break if not needs_updating: ret['comment'] = 'usage plan {0} is already in a correct state'.format(plan_name) ret['result'] = True return ret if __opts__['test']: ret['comment'] = 'a new usage plan {0} would be updated'.format(plan_name) ret['result'] = None return ret result = __salt__['boto_apigateway.update_usage_plan'](plan['id'], throttle=throttle, quota=quota, **common_args) if 'error' in result: ret['result'] = False ret['comment'] = 'Failed to update a usage plan {0}, {1}'.format(plan_name, result['error']) return ret ret['changes']['old'] = {'plan': plan} ret['comment'] = 'usage plan {0} has been updated'.format(plan_name) newstate = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args) if 'error' in existing: ret['result'] = False ret['comment'] = 'Failed to describe existing usage plans after updates' return ret ret['changes']['new'] = {'plan': newstate['plans'][0]} except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret
Ensure the spcifieda usage plan with the corresponding metrics is deployed .. versionadded:: 2017.7.0 name name of the state plan_name [Required] name of the usage plan throttle [Optional] throttling parameters expressed as a dictionary. If provided, at least one of the throttling parameters must be present rateLimit rate per second at which capacity bucket is populated burstLimit maximum rate allowed quota [Optional] quota on the number of api calls permitted by the plan. If provided, limit and period must be present limit [Required] number of calls permitted per quota period offset [Optional] number of calls to be subtracted from the limit at the beginning of the period period [Required] period to which quota applies. Must be DAY, WEEK or MONTH .. code-block:: yaml UsagePlanPresent: boto_apigateway.usage_plan_present: - plan_name: my_usage_plan - throttle: rateLimit: 70 burstLimit: 100 - quota: limit: 1000 offset: 0 period: DAY - profile: my_profile
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_apigateway.py#L1688-L1826
null
# -*- coding: utf-8 -*- ''' Manage Apigateway Rest APIs =========================== .. versionadded:: 2016.11.0 :depends: - boto >= 2.8.0 - boto3 >= 1.2.1 - botocore >= 1.4.49 Create and destroy rest apis depending on a swagger version 2 definition file. Be aware that this interacts with Amazon's services, and so may incur charges. This module uses ``boto3``, which can be installed via package, or pip. This module accepts explicit vpc credentials but can also utilize IAM roles assigned to the instance through Instance Profiles. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More information available `here <http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html>`_. If IAM roles are not used you need to specify them either in a pillar file or in the minion's config file: .. code-block:: yaml vpc.keyid: GKTADJGHEIQSXMKKRBJ08H vpc.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs It's also possible to specify ``key``, ``keyid`` and ``region`` via a profile, either passed in as a dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 .. code-block:: yaml Ensure Apigateway API exists: boto_apigateway.present: - name: myfunction - region: us-east-1 - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import hashlib import logging import os import re # Import Salt Libs import salt.utils.files import salt.utils.json import salt.utils.yaml # Import 3rd-party libs from salt.ext import six log = logging.getLogger(__name__) def __virtual__(): ''' Only load if boto is available. ''' return 'boto_apigateway' if 'boto_apigateway.describe_apis' in __salt__ else False def present(name, api_name, swagger_file, stage_name, api_key_required, lambda_integration_role, lambda_region=None, stage_variables=None, region=None, key=None, keyid=None, profile=None, lambda_funcname_format='{stage}_{api}_{resource}_{method}', authorization_type='NONE', error_response_template=None, response_template=None): ''' Ensure the spcified api_name with the corresponding swaggerfile is deployed to the given stage_name in AWS ApiGateway. this state currently only supports ApiGateway integration with AWS Lambda, and CORS support is handled through a Mock integration. There may be multiple deployments for the API object, each deployment is tagged with a description (i.e. unique label) in pretty printed json format consisting of the following key/values. .. code-block:: text { "api_name": api_name, "swagger_file": basename_of_swagger_file "swagger_file_md5sum": md5sum_of_swagger_file, "swagger_info_object": info_object_content_in_swagger_file } Please note that the name of the lambda function to be integrated will be derived via the provided lambda_funcname_format parameters: - the default lambda_funcname_format is a string with the following substitutable keys: "{stage}_{api}_{resource}_{method}". The user can choose to reorder the known keys. - the stage key corresponds to the stage_name passed in. - the api key corresponds to the api_name passed in. - the resource corresponds to the resource path defined in the passed swagger file. - the method corresponds to the method for a resource path defined in the passed swagger file. For the default lambda_funcname_format, given the following input: .. code-block:: python api_name = ' Test Service' stage_name = 'alpha' basePath = '/api' path = '/a/{b}/c' method = 'POST' We will end up with the following Lambda Function Name that will be looked up: 'test_service_alpha_a_b_c_post' The canconicalization of these input parameters is done in the following order: 1. lambda_funcname_format is formatted with the input parameters as passed, 2. resulting string is stripped for leading/trailing spaces, 3. path parameter's curly braces are removed from the resource path, 4. consecutive spaces and forward slashes in the paths are replaced with '_' 5. consecutive '_' are replaced with '_' Please note that for error response handling, the swagger file must have an error response model with the following schema. The lambda functions should throw exceptions for any non successful responses. An optional pattern field can be specified in errorMessage field to aid the response mapping from Lambda to the proper error return status codes. .. code-block:: yaml Error: type: object properties: stackTrace: type: array items: type: array items: type: string description: call stack errorType: type: string description: error type errorMessage: type: string description: | Error message, will be matched based on pattern. If no pattern is specified, the default pattern used for response mapping will be +*. name The name of the state definition api_name The name of the rest api that we want to ensure exists in AWS API Gateway swagger_file Name of the location of the swagger rest api definition file in YAML format. stage_name Name of the stage we want to be associated with the given api_name and swagger_file definition api_key_required True or False - whether the API Key is required to call API methods lambda_integration_role The name or ARN of the IAM role that the AWS ApiGateway assumes when it executes your lambda function to handle incoming requests lambda_region The region where we expect to find the lambda functions. This is used to determine the region where we should look for the Lambda Function for integration purposes. The region determination is based on the following priority: 1. lambda_region as passed in (is not None) 2. if lambda_region is None, use the region as if a boto_lambda function were executed without explicitly specifying lambda region. 3. if region determined in (2) is different than the region used by boto_apigateway functions, a final lookup will be attempted using the boto_apigateway region. stage_variables A dict with variables and their values, or a pillar key (string) that contains a dict with variables and their values. key and values in the dict must be strings. {'string': 'string'} region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. lambda_funcname_format Please review the earlier example for the usage. The only substituable keys in the funcname format are {stage}, {api}, {resource}, {method}. Any other keys or positional subsitution parameters will be flagged as an invalid input. authorization_type This field can be either 'NONE', or 'AWS_IAM'. This will be applied to all methods in the given swagger spec file. Default is set to 'NONE' error_response_template String value that defines the response template mapping that should be applied in cases error occurs. Refer to AWS documentation for details: http://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-mapping-template-reference.html If set to None, the following default value is used: .. code-block:: text '#set($inputRoot = $input.path(\'$\'))\\n' '{\\n' ' "errorMessage" : "$inputRoot.errorMessage",\\n' ' "errorType" : "$inputRoot.errorType",\\n' ' "stackTrace" : [\\n' '#foreach($stackTrace in $inputRoot.stackTrace)\\n' ' [\\n' '#foreach($elem in $stackTrace)\\n' ' "$elem"\\n' '#if($foreach.hasNext),#end\\n' '#end\\n' ' ]\\n' '#if($foreach.hasNext),#end\\n' '#end\\n' ' ]\\n' .. versionadded:: 2017.7.0 response_template String value that defines the response template mapping applied in case of success (including OPTIONS method) If set to None, empty ({}) template is assumed, which will transfer response from the lambda function as is. .. versionadded:: 2017.7.0 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) # try to open the swagger file and basic validation swagger = _Swagger(api_name, stage_name, lambda_funcname_format, swagger_file, error_response_template, response_template, common_args) # retrieve stage variables stage_vars = _get_stage_variables(stage_variables) # verify if api and stage already exists ret = swagger.verify_api(ret) if ret.get('publish'): # there is a deployment label with signature matching the given api_name, # swagger file name, swagger file md5 sum, and swagger file info object # just reassociate the stage_name to the given deployment label. if __opts__['test']: ret['comment'] = ('[stage: {0}] will be reassociated to an already available ' 'deployment that matched the given [api_name: {1}] ' 'and [swagger_file: {2}].\n' 'Stage variables will be set ' 'to {3}.'.format(stage_name, api_name, swagger_file, stage_vars)) ret['result'] = None return ret return swagger.publish_api(ret, stage_vars) if ret.get('current'): # already at desired state for the stage, swagger_file, and api_name if __opts__['test']: ret['comment'] = ('[stage: {0}] is already at desired state with an associated ' 'deployment matching the given [api_name: {1}] ' 'and [swagger_file: {2}].\n' 'Stage variables will be set ' 'to {3}.'.format(stage_name, api_name, swagger_file, stage_vars)) ret['result'] = None return swagger.overwrite_stage_variables(ret, stage_vars) # there doesn't exist any previous deployments for the given swagger_file, we need # to redeploy the content of the swagger file to the api, models, and resources object # and finally create a new deployment and tie the stage_name to this new deployment if __opts__['test']: ret['comment'] = ('There is no deployment matching the given [api_name: {0}] ' 'and [swagger_file: {1}]. A new deployment will be ' 'created and the [stage_name: {2}] will then be associated ' 'to the newly created deployment.\n' 'Stage variables will be set ' 'to {3}.'.format(api_name, swagger_file, stage_name, stage_vars)) ret['result'] = None return ret ret = swagger.deploy_api(ret) if ret.get('abort'): return ret ret = swagger.deploy_models(ret) if ret.get('abort'): return ret ret = swagger.deploy_resources(ret, api_key_required=api_key_required, lambda_integration_role=lambda_integration_role, lambda_region=lambda_region, authorization_type=authorization_type) if ret.get('abort'): return ret ret = swagger.publish_api(ret, stage_vars) except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret def _get_stage_variables(stage_variables): ''' Helper function to retrieve stage variables from pillars/options, if the input is a string ''' ret = dict() if stage_variables is None: return ret if isinstance(stage_variables, six.string_types): if stage_variables in __opts__: ret = __opts__[stage_variables] master_opts = __pillar__.get('master', {}) if stage_variables in master_opts: ret = master_opts[stage_variables] if stage_variables in __pillar__: ret = __pillar__[stage_variables] elif isinstance(stage_variables, dict): ret = stage_variables if not isinstance(ret, dict): ret = dict() return ret def absent(name, api_name, stage_name, nuke_api=False, region=None, key=None, keyid=None, profile=None): ''' Ensure the stage_name associated with the given api_name deployed by boto_apigateway's present state is removed. If the currently associated deployment to the given stage_name has no other stages associated with it, the deployment will also be removed. name Name of the swagger file in YAML format api_name Name of the rest api on AWS ApiGateway to ensure is absent. stage_name Name of the stage to be removed irrespective of the swagger file content. If the current deployment associated with the stage_name has no other stages associated with it, the deployment will also be removed. nuke_api If True, removes the API itself only if there are no other stages associated with any other deployments once the given stage_name is removed. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) swagger = _Swagger(api_name, stage_name, '', None, None, None, common_args) if not swagger.restApiId: ret['comment'] = '[Rest API: {0}] does not exist.'.format(api_name) return ret if __opts__['test']: if nuke_api: ret['comment'] = ('[stage: {0}] will be deleted, if there are no other ' 'active stages, the [api: {1} will also be ' 'deleted.'.format(stage_name, api_name)) else: ret['comment'] = ('[stage: {0}] will be deleted.'.format(stage_name)) ret['result'] = None return ret ret = swagger.delete_stage(ret) if ret.get('abort'): return ret if nuke_api and swagger.no_more_deployments_remain(): ret = swagger.delete_api(ret) except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret # Helper Swagger Class for swagger version 2.0 API specification def _gen_md5_filehash(fname, *args): ''' helper function to generate a md5 hash of the swagger definition file any extra argument passed to the function is converted to a string and participates in the hash calculation ''' _hash = hashlib.md5() with salt.utils.files.fopen(fname, 'rb') as f: for chunk in iter(lambda: f.read(4096), b''): _hash.update(chunk) for extra_arg in args: _hash.update(six.b(str(extra_arg))) return _hash.hexdigest() def _dict_to_json_pretty(d, sort_keys=True): ''' helper function to generate pretty printed json output ''' return salt.utils.json.dumps(d, indent=4, separators=(',', ': '), sort_keys=sort_keys) # Heuristic on whether or not the property name loosely matches given set of 'interesting' factors # If you are interested in IDs for example, 'id', 'blah_id', 'blahId' would all match def _name_matches(name, matches): ''' Helper function to see if given name has any of the patterns in given matches ''' for m in matches: if name.endswith(m): return True if name.lower().endswith('_' + m.lower()): return True if name.lower() == m.lower(): return True return False def _object_reducer(o, names=('id', 'name', 'path', 'httpMethod', 'statusCode', 'Created', 'Deleted', 'Updated', 'Flushed', 'Associated', 'Disassociated')): ''' Helper function to reduce the amount of information that will be kept in the change log for API GW related return values ''' result = {} if isinstance(o, dict): for k, v in six.iteritems(o): if isinstance(v, dict): reduced = v if k == 'variables' else _object_reducer(v, names) if reduced or _name_matches(k, names): result[k] = reduced elif isinstance(v, list): newlist = [] for val in v: reduced = _object_reducer(val, names) if reduced or _name_matches(k, names): newlist.append(reduced) if newlist: result[k] = newlist else: if _name_matches(k, names): result[k] = v return result def _log_changes(ret, changekey, changevalue): ''' For logging create/update/delete operations to AWS ApiGateway ''' cl = ret['changes'].get('new', []) cl.append({changekey: _object_reducer(changevalue)}) ret['changes']['new'] = cl return ret def _log_error_and_abort(ret, obj): ''' helper function to update errors in the return structure ''' ret['result'] = False ret['abort'] = True if 'error' in obj: ret['comment'] = '{0}'.format(obj.get('error')) return ret class _Swagger(object): ''' this is a helper class that holds the swagger definition file and the associated logic related to how to interpret the file and apply it to AWS Api Gateway. The main interface to the outside world is in deploy_api, deploy_models, and deploy_resources methods. ''' SWAGGER_OBJ_V2_FIELDS = ('swagger', 'info', 'host', 'basePath', 'schemes', 'consumes', 'produces', 'paths', 'definitions', 'parameters', 'responses', 'securityDefinitions', 'security', 'tags', 'externalDocs') # SWAGGER OBJECT V2 Fields that are required by boto apigateway states. SWAGGER_OBJ_V2_FIELDS_REQUIRED = ('swagger', 'info', 'basePath', 'schemes', 'paths', 'definitions') # SWAGGER OPERATION NAMES SWAGGER_OPERATION_NAMES = ('get', 'put', 'post', 'delete', 'options', 'head', 'patch') SWAGGER_VERSIONS_SUPPORTED = ('2.0',) # VENDOR SPECIFIC FIELD PATTERNS VENDOR_EXT_PATTERN = re.compile('^x-') # JSON_SCHEMA_REF JSON_SCHEMA_DRAFT_4 = 'http://json-schema.org/draft-04/schema#' # AWS integration templates for normal and options methods REQUEST_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n' '{\n' '"header_params" : {\n' '#set ($map = $input.params().header)\n' '#foreach( $param in $map.entrySet() )\n' '"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n' '#end\n' '},\n' '"query_params" : {\n' '#set ($map = $input.params().querystring)\n' '#foreach( $param in $map.entrySet() )\n' '"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n' '#end\n' '},\n' '"path_params" : {\n' '#set ($map = $input.params().path)\n' '#foreach( $param in $map.entrySet() )\n' '"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n' '#end\n' '},\n' '"apigw_context" : {\n' '"apiId": "$context.apiId",\n' '"httpMethod": "$context.httpMethod",\n' '"requestId": "$context.requestId",\n' '"resourceId": "$context.resourceId",\n' '"resourcePath": "$context.resourcePath",\n' '"stage": "$context.stage",\n' '"identity": {\n' ' "user":"$context.identity.user",\n' ' "userArn":"$context.identity.userArn",\n' ' "userAgent":"$context.identity.userAgent",\n' ' "sourceIp":"$context.identity.sourceIp",\n' ' "cognitoIdentityId":"$context.identity.cognitoIdentityId",\n' ' "cognitoIdentityPoolId":"$context.identity.cognitoIdentityPoolId",\n' ' "cognitoAuthenticationType":"$context.identity.cognitoAuthenticationType",\n' ' "cognitoAuthenticationProvider":["$util.escapeJavaScript($context.identity.cognitoAuthenticationProvider)"],\n' ' "caller":"$context.identity.caller",\n' ' "apiKey":"$context.identity.apiKey",\n' ' "accountId":"$context.identity.accountId"\n' '}\n' '},\n' '"body_params" : $input.json(\'$\'),\n' '"stage_variables": {\n' '#foreach($variable in $stageVariables.keySet())\n' '"$variable": "$util.escapeJavaScript($stageVariables.get($variable))"\n' '#if($foreach.hasNext), #end\n' '#end\n' '}\n' '}'} REQUEST_OPTION_TEMPLATE = {'application/json': '{"statusCode": 200}'} # AWS integration response template mapping to convert stackTrace part or the error # to a uniform format containing strings only. Swagger does not seem to allow defining # an array of non-uniform types, to it is not possible to create error model to match # exactly what comes out of lambda functions in case of error. RESPONSE_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n' '{\n' ' "errorMessage" : "$inputRoot.errorMessage",\n' ' "errorType" : "$inputRoot.errorType",\n' ' "stackTrace" : [\n' '#foreach($stackTrace in $inputRoot.stackTrace)\n' ' [\n' '#foreach($elem in $stackTrace)\n' ' "$elem"\n' '#if($foreach.hasNext),#end\n' '#end\n' ' ]\n' '#if($foreach.hasNext),#end\n' '#end\n' ' ]\n' '}'} RESPONSE_OPTION_TEMPLATE = {} # This string should not be modified, every API created by this state will carry the description # below. AWS_API_DESCRIPTION = _dict_to_json_pretty({"provisioned_by": "Salt boto_apigateway.present State", "context": "See deployment or stage description"}) class SwaggerParameter(object): ''' This is a helper class for the Swagger Parameter Object ''' LOCATIONS = ('body', 'query', 'header', 'path') def __init__(self, paramdict): self._paramdict = paramdict @property def location(self): ''' returns location in the swagger parameter object ''' _location = self._paramdict.get('in') if _location in _Swagger.SwaggerParameter.LOCATIONS: return _location raise ValueError('Unsupported parameter location: {0} in Parameter Object'.format(_location)) @property def name(self): ''' returns parameter name in the swagger parameter object ''' _name = self._paramdict.get('name') if _name: if self.location == 'header': return 'method.request.header.{0}'.format(_name) elif self.location == 'query': return 'method.request.querystring.{0}'.format(_name) elif self.location == 'path': return 'method.request.path.{0}'.format(_name) return None raise ValueError('Parameter must have a name: {0}'.format(_dict_to_json_pretty(self._paramdict))) @property def schema(self): ''' returns the name of the schema given the reference in the swagger parameter object ''' if self.location == 'body': _schema = self._paramdict.get('schema') if _schema: if '$ref' in _schema: schema_name = _schema.get('$ref').split('/')[-1] return schema_name raise ValueError(('Body parameter must have a JSON reference ' 'to the schema definition due to Amazon API restrictions: {0}'.format(self.name))) raise ValueError('Body parameter must have a schema: {0}'.format(self.name)) return None class SwaggerMethodResponse(object): ''' Helper class for Swagger Method Response Object ''' def __init__(self, r): self._r = r @property def schema(self): ''' returns the name of the schema given the reference in the swagger method response object ''' _schema = self._r.get('schema') if _schema: if '$ref' in _schema: return _schema.get('$ref').split('/')[-1] raise ValueError(('Method response must have a JSON reference ' 'to the schema definition: {0}'.format(_schema))) return None @property def headers(self): ''' returns the headers dictionary in the method response object ''' _headers = self._r.get('headers', {}) return _headers def __init__(self, api_name, stage_name, lambda_funcname_format, swagger_file_path, error_response_template, response_template, common_aws_args): self._api_name = api_name self._stage_name = stage_name self._lambda_funcname_format = lambda_funcname_format self._common_aws_args = common_aws_args self._restApiId = '' self._deploymentId = '' self._error_response_template = error_response_template self._response_template = response_template if swagger_file_path is not None: if os.path.exists(swagger_file_path) and os.path.isfile(swagger_file_path): self._swagger_file = swagger_file_path self._md5_filehash = _gen_md5_filehash(self._swagger_file, error_response_template, response_template) with salt.utils.files.fopen(self._swagger_file, 'rb') as sf: self._cfg = salt.utils.yaml.safe_load(sf) self._swagger_version = '' else: raise IOError('Invalid swagger file path, {0}'.format(swagger_file_path)) self._validate_swagger_file() self._validate_lambda_funcname_format() self._resolve_api_id() def _is_http_error_rescode(self, code): ''' Helper function to determine if the passed code is in the 400~599 range of http error codes ''' return bool(re.match(r'^\s*[45]\d\d\s*$', code)) def _validate_error_response_model(self, paths, mods): ''' Helper function to help validate the convention established in the swagger file on how to handle response code mapping/integration ''' for path, ops in paths: for opname, opobj in six.iteritems(ops): if opname not in _Swagger.SWAGGER_OPERATION_NAMES: continue if 'responses' not in opobj: raise ValueError('missing mandatory responses field in path item object') for rescode, resobj in six.iteritems(opobj.get('responses')): if not self._is_http_error_rescode(str(rescode)): # future lint: disable=blacklisted-function continue # only check for response code from 400-599 if 'schema' not in resobj: raise ValueError('missing schema field in path {0}, ' 'op {1}, response {2}'.format(path, opname, rescode)) schemaobj = resobj.get('schema') if '$ref' not in schemaobj: raise ValueError('missing $ref field under schema in ' 'path {0}, op {1}, response {2}'.format(path, opname, rescode)) schemaobjref = schemaobj.get('$ref', '/') modelname = schemaobjref.split('/')[-1] if modelname not in mods: raise ValueError('model schema {0} reference not found ' 'under /definitions'.format(schemaobjref)) model = mods.get(modelname) if model.get('type') != 'object': raise ValueError('model schema {0} must be type object'.format(modelname)) if 'properties' not in model: raise ValueError('model schema {0} must have properties fields'.format(modelname)) modelprops = model.get('properties') if 'errorMessage' not in modelprops: raise ValueError('model schema {0} must have errorMessage as a property to ' 'match AWS convention. If pattern is not set, .+ will ' 'be used'.format(modelname)) def _validate_lambda_funcname_format(self): ''' Checks if the lambda function name format contains only known elements :return: True on success, ValueError raised on error ''' try: if self._lambda_funcname_format: known_kwargs = dict(stage='', api='', resource='', method='') self._lambda_funcname_format.format(**known_kwargs) return True except Exception: raise ValueError('Invalid lambda_funcname_format {0}. Please review ' 'documentation for known substitutable keys'.format(self._lambda_funcname_format)) def _validate_swagger_file(self): ''' High level check/validation of the input swagger file based on https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md This is not a full schema compliance check, but rather make sure that the input file (YAML or JSON) can be read into a dictionary, and we check for the content of the Swagger Object for version and info. ''' # check for any invalid fields for Swagger Object V2 for field in self._cfg: if (field not in _Swagger.SWAGGER_OBJ_V2_FIELDS and not _Swagger.VENDOR_EXT_PATTERN.match(field)): raise ValueError('Invalid Swagger Object Field: {0}'.format(field)) # check for Required Swagger fields by Saltstack boto apigateway state for field in _Swagger.SWAGGER_OBJ_V2_FIELDS_REQUIRED: if field not in self._cfg: raise ValueError('Missing Swagger Object Field: {0}'.format(field)) # check for Swagger Version self._swagger_version = self._cfg.get('swagger') if self._swagger_version not in _Swagger.SWAGGER_VERSIONS_SUPPORTED: raise ValueError('Unsupported Swagger version: {0},' 'Supported versions are {1}'.format(self._swagger_version, _Swagger.SWAGGER_VERSIONS_SUPPORTED)) log.info(type(self._models)) self._validate_error_response_model(self.paths, self._models()) @property def md5_filehash(self): ''' returns md5 hash for the swagger file ''' return self._md5_filehash @property def info(self): ''' returns the swagger info object as a dictionary ''' info = self._cfg.get('info') if not info: raise ValueError('Info Object has no values') return info @property def info_json(self): ''' returns the swagger info object as a pretty printed json string. ''' return _dict_to_json_pretty(self.info) @property def rest_api_name(self): ''' returns the name of the api ''' return self._api_name @property def rest_api_version(self): ''' returns the version field in the swagger info object ''' version = self.info.get('version') if not version: raise ValueError('Missing version value in Info Object') return version def _models(self): ''' returns an iterator for the models specified in the swagger file ''' models = self._cfg.get('definitions') if not models: raise ValueError('Definitions Object has no values, You need to define them in your swagger file') return models def models(self): ''' generator to return the tuple of model and its schema to create on aws. ''' model_dict = self._build_all_dependencies() while True: model = self._get_model_without_dependencies(model_dict) if not model: break yield (model, self._models().get(model)) @property def paths(self): ''' returns an iterator for the relative resource paths specified in the swagger file ''' paths = self._cfg.get('paths') if not paths: raise ValueError('Paths Object has no values, You need to define them in your swagger file') for path in paths: if not path.startswith('/'): raise ValueError('Path object {0} should start with /. Please fix it'.format(path)) return six.iteritems(paths) @property def basePath(self): ''' returns the base path field as defined in the swagger file ''' basePath = self._cfg.get('basePath', '') return basePath @property def restApiId(self): ''' returns the rest api id as returned by AWS on creation of the rest api ''' return self._restApiId @restApiId.setter def restApiId(self, restApiId): ''' allows the assignment of the rest api id on creation of the rest api ''' self._restApiId = restApiId @property def deployment_label_json(self): ''' this property returns the unique description in pretty printed json for a particular api deployment ''' return _dict_to_json_pretty(self.deployment_label) @property def deployment_label(self): ''' this property returns the deployment label dictionary (mainly used by stage description) ''' label = dict() label['swagger_info_object'] = self.info label['api_name'] = self.rest_api_name label['swagger_file'] = os.path.basename(self._swagger_file) label['swagger_file_md5sum'] = self.md5_filehash return label # methods to interact with boto_apigateway execution modules def _one_or_more_stages_remain(self, deploymentId): ''' Helper function to find whether there are other stages still associated with a deployment ''' stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args).get('stages') return bool(stages) def no_more_deployments_remain(self): ''' Helper function to find whether there are deployments left with stages associated ''' no_more_deployments = True deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId, **self._common_aws_args).get('deployments') if deployments: for deployment in deployments: deploymentId = deployment.get('id') stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args).get('stages') if stages: no_more_deployments = False break return no_more_deployments def _get_current_deployment_id(self): ''' Helper method to find the deployment id that the stage name is currently assocaited with. ''' deploymentId = '' stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, **self._common_aws_args).get('stage') if stage: deploymentId = stage.get('deploymentId') return deploymentId def _get_current_deployment_label(self): ''' Helper method to find the deployment label that the stage_name is currently associated with. ''' deploymentId = self._get_current_deployment_id() deployment = __salt__['boto_apigateway.describe_api_deployment'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args).get('deployment') if deployment: return deployment.get('description') return None def _get_desired_deployment_id(self): ''' Helper method to return the deployment id matching the desired deployment label for this Swagger object based on the given api_name, swagger_file ''' deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId, **self._common_aws_args).get('deployments') if deployments: for deployment in deployments: if deployment.get('description') == self.deployment_label_json: return deployment.get('id') return '' def overwrite_stage_variables(self, ret, stage_variables): ''' overwrite the given stage_name's stage variables with the given stage_variables ''' res = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId, stageName=self._stage_name, variables=stage_variables, **self._common_aws_args) if not res.get('overwrite'): ret['result'] = False ret['abort'] = True ret['comment'] = res.get('error') else: ret = _log_changes(ret, 'overwrite_stage_variables', res.get('stage')) return ret def _set_current_deployment(self, stage_desc_json, stage_variables): ''' Helper method to associate the stage_name to the given deploymentId and make this current ''' stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, **self._common_aws_args).get('stage') if not stage: stage = __salt__['boto_apigateway.create_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, deploymentId=self._deploymentId, description=stage_desc_json, variables=stage_variables, **self._common_aws_args) if not stage.get('stage'): return {'set': False, 'error': stage.get('error')} else: # overwrite the stage variables overwrite = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId, stageName=self._stage_name, variables=stage_variables, **self._common_aws_args) if not overwrite.get('stage'): return {'set': False, 'error': overwrite.get('error')} return __salt__['boto_apigateway.activate_api_deployment'](restApiId=self.restApiId, stageName=self._stage_name, deploymentId=self._deploymentId, **self._common_aws_args) def _resolve_api_id(self): ''' returns an Api Id that matches the given api_name and the hardcoded _Swagger.AWS_API_DESCRIPTION as the api description ''' apis = __salt__['boto_apigateway.describe_apis'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args).get('restapi') if apis: if len(apis) == 1: self.restApiId = apis[0].get('id') else: raise ValueError('Multiple APIs matching given name {0} and ' 'description {1}'.format(self.rest_api_name, self.info_json)) def delete_stage(self, ret): ''' Method to delete the given stage_name. If the current deployment tied to the given stage_name has no other stages associated with it, the deployment will be removed as well ''' deploymentId = self._get_current_deployment_id() if deploymentId: result = __salt__['boto_apigateway.delete_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, **self._common_aws_args) if not result.get('deleted'): ret['abort'] = True ret['result'] = False ret['comment'] = 'delete_stage delete_api_stage, {0}'.format(result.get('error')) else: # check if it is safe to delete the deployment as well. if not self._one_or_more_stages_remain(deploymentId): result = __salt__['boto_apigateway.delete_api_deployment'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args) if not result.get('deleted'): ret['abort'] = True ret['result'] = False ret['comment'] = 'delete_stage delete_api_deployment, {0}'.format(result.get('error')) else: ret['comment'] = 'stage {0} has been deleted.\n'.format(self._stage_name) else: # no matching stage_name/deployment found ret['comment'] = 'stage {0} does not exist'.format(self._stage_name) return ret def verify_api(self, ret): ''' this method helps determine if the given stage_name is already on a deployment label matching the input api_name, swagger_file. If yes, returns abort with comment indicating already at desired state. If not and there is previous deployment labels in AWS matching the given input api_name and swagger file, indicate to the caller that we only need to reassociate stage_name to the previously existing deployment label. ''' if self.restApiId: deployed_label_json = self._get_current_deployment_label() if deployed_label_json == self.deployment_label_json: ret['comment'] = ('Already at desired state, the stage {0} is already at the desired ' 'deployment label:\n{1}'.format(self._stage_name, deployed_label_json)) ret['current'] = True return ret else: self._deploymentId = self._get_desired_deployment_id() if self._deploymentId: ret['publish'] = True return ret def publish_api(self, ret, stage_variables): ''' this method tie the given stage_name to a deployment matching the given swagger_file ''' stage_desc = dict() stage_desc['current_deployment_label'] = self.deployment_label stage_desc_json = _dict_to_json_pretty(stage_desc) if self._deploymentId: # just do a reassociate of stage_name to an already existing deployment res = self._set_current_deployment(stage_desc_json, stage_variables) if not res.get('set'): ret['abort'] = True ret['result'] = False ret['comment'] = res.get('error') else: ret = _log_changes(ret, 'publish_api (reassociate deployment, set stage_variables)', res.get('response')) else: # no deployment existed for the given swagger_file for this Swagger object res = __salt__['boto_apigateway.create_api_deployment'](restApiId=self.restApiId, stageName=self._stage_name, stageDescription=stage_desc_json, description=self.deployment_label_json, variables=stage_variables, **self._common_aws_args) if not res.get('created'): ret['abort'] = True ret['result'] = False ret['comment'] = res.get('error') else: ret = _log_changes(ret, 'publish_api (new deployment)', res.get('deployment')) return ret def _cleanup_api(self): ''' Helper method to clean up resources and models if we detected a change in the swagger file for a stage ''' resources = __salt__['boto_apigateway.describe_api_resources'](restApiId=self.restApiId, **self._common_aws_args) if resources.get('resources'): res = resources.get('resources')[1:] res.reverse() for resource in res: delres = __salt__['boto_apigateway.delete_api_resources'](restApiId=self.restApiId, path=resource.get('path'), **self._common_aws_args) if not delres.get('deleted'): return delres models = __salt__['boto_apigateway.describe_api_models'](restApiId=self.restApiId, **self._common_aws_args) if models.get('models'): for model in models.get('models'): delres = __salt__['boto_apigateway.delete_api_model'](restApiId=self.restApiId, modelName=model.get('name'), **self._common_aws_args) if not delres.get('deleted'): return delres return {'deleted': True} def deploy_api(self, ret): ''' this method create the top level rest api in AWS apigateway ''' if self.restApiId: res = self._cleanup_api() if not res.get('deleted'): ret['comment'] = 'Failed to cleanup restAreId {0}'.format(self.restApiId) ret['abort'] = True ret['result'] = False return ret return ret response = __salt__['boto_apigateway.create_api'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args) if not response.get('created'): ret['result'] = False ret['abort'] = True if 'error' in response: ret['comment'] = 'Failed to create rest api: {0}.'.format(response['error']['message']) return ret self.restApiId = response.get('restapi', {}).get('id') return _log_changes(ret, 'deploy_api', response.get('restapi')) def delete_api(self, ret): ''' Method to delete a Rest Api named defined in the swagger file's Info Object's title value. ret a dictionary for returning status to Saltstack ''' exists_response = __salt__['boto_apigateway.api_exists'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args) if exists_response.get('exists'): if __opts__['test']: ret['comment'] = 'Rest API named {0} is set to be deleted.'.format(self.rest_api_name) ret['result'] = None ret['abort'] = True return ret delete_api_response = __salt__['boto_apigateway.delete_api'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args) if not delete_api_response.get('deleted'): ret['result'] = False ret['abort'] = True if 'error' in delete_api_response: ret['comment'] = 'Failed to delete rest api: {0}.'.format(delete_api_response['error']['message']) return ret ret = _log_changes(ret, 'delete_api', delete_api_response) else: ret['comment'] = ('api already absent for swagger file: ' '{0}, desc: {1}'.format(self.rest_api_name, self.info_json)) return ret def _aws_model_ref_from_swagger_ref(self, r): ''' Helper function to reference models created on aws apigw ''' model_name = r.split('/')[-1] return 'https://apigateway.amazonaws.com/restapis/{0}/models/{1}'.format(self.restApiId, model_name) def _update_schema_to_aws_notation(self, schema): ''' Helper function to map model schema to aws notation ''' result = {} for k, v in schema.items(): if k == '$ref': v = self._aws_model_ref_from_swagger_ref(v) if isinstance(v, dict): v = self._update_schema_to_aws_notation(v) result[k] = v return result def _build_dependent_model_list(self, obj_schema): ''' Helper function to build the list of models the given object schema is referencing. ''' dep_models_list = [] if obj_schema: obj_schema['type'] = obj_schema.get('type', 'object') if obj_schema['type'] == 'array': dep_models_list.extend(self._build_dependent_model_list(obj_schema.get('items', {}))) else: ref = obj_schema.get('$ref') if ref: ref_obj_model = ref.split("/")[-1] ref_obj_schema = self._models().get(ref_obj_model) dep_models_list.extend(self._build_dependent_model_list(ref_obj_schema)) dep_models_list.extend([ref_obj_model]) else: # need to walk each property object properties = obj_schema.get('properties') if properties: for _, prop_obj_schema in six.iteritems(properties): dep_models_list.extend(self._build_dependent_model_list(prop_obj_schema)) return list(set(dep_models_list)) def _build_all_dependencies(self): ''' Helper function to build a map of model to their list of model reference dependencies ''' ret = {} for model, schema in six.iteritems(self._models()): dep_list = self._build_dependent_model_list(schema) ret[model] = dep_list return ret def _get_model_without_dependencies(self, models_dict): ''' Helper function to find the next model that should be created ''' next_model = None if not models_dict: return next_model for model, dependencies in six.iteritems(models_dict): if dependencies == []: next_model = model break if next_model is None: raise ValueError('incomplete model definitions, models in dependency ' 'list not defined: {0}'.format(models_dict)) # remove the model from other depednencies before returning models_dict.pop(next_model) for model, dep_list in six.iteritems(models_dict): if next_model in dep_list: dep_list.remove(next_model) return next_model def deploy_models(self, ret): ''' Method to deploy swagger file's definition objects and associated schema to AWS Apigateway as Models ret a dictionary for returning status to Saltstack ''' for model, schema in self.models(): # add in a few attributes into the model schema that AWS expects # _schema = schema.copy() _schema = self._update_schema_to_aws_notation(schema) _schema.update({'$schema': _Swagger.JSON_SCHEMA_DRAFT_4, 'title': '{0} Schema'.format(model)}) # check to see if model already exists, aws has 2 default models [Empty, Error] # which may need upate with data from swagger file model_exists_response = __salt__['boto_apigateway.api_model_exists'](restApiId=self.restApiId, modelName=model, **self._common_aws_args) if model_exists_response.get('exists'): update_model_schema_response = ( __salt__['boto_apigateway.update_api_model_schema'](restApiId=self.restApiId, modelName=model, schema=_dict_to_json_pretty(_schema), **self._common_aws_args)) if not update_model_schema_response.get('updated'): ret['result'] = False ret['abort'] = True if 'error' in update_model_schema_response: ret['comment'] = ('Failed to update existing model {0} with schema {1}, ' 'error: {2}'.format(model, _dict_to_json_pretty(schema), update_model_schema_response['error']['message'])) return ret ret = _log_changes(ret, 'deploy_models', update_model_schema_response) else: create_model_response = ( __salt__['boto_apigateway.create_api_model'](restApiId=self.restApiId, modelName=model, modelDescription=model, schema=_dict_to_json_pretty(_schema), contentType='application/json', **self._common_aws_args)) if not create_model_response.get('created'): ret['result'] = False ret['abort'] = True if 'error' in create_model_response: ret['comment'] = ('Failed to create model {0}, schema {1}, ' 'error: {2}'.format(model, _dict_to_json_pretty(schema), create_model_response['error']['message'])) return ret ret = _log_changes(ret, 'deploy_models', create_model_response) return ret def _lambda_name(self, resourcePath, httpMethod): ''' Helper method to construct lambda name based on the rule specified in doc string of boto_apigateway.api_present function ''' lambda_name = self._lambda_funcname_format.format(stage=self._stage_name, api=self.rest_api_name, resource=resourcePath, method=httpMethod) lambda_name = lambda_name.strip() lambda_name = re.sub(r'{|}', '', lambda_name) lambda_name = re.sub(r'\s+|/', '_', lambda_name).lower() return re.sub(r'_+', '_', lambda_name) def _lambda_uri(self, lambda_name, lambda_region): ''' Helper Method to construct the lambda uri for use in method integration ''' profile = self._common_aws_args.get('profile') region = self._common_aws_args.get('region') lambda_region = __utils__['boto3.get_region']('lambda', lambda_region, profile) apigw_region = __utils__['boto3.get_region']('apigateway', region, profile) lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args) if lambda_region != apigw_region: if not lambda_desc.get('function'): # try look up in the same region as the apigateway as well if previous lookup failed lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args) if not lambda_desc.get('function'): raise ValueError('Could not find lambda function {0} in ' 'regions [{1}, {2}].'.format(lambda_name, lambda_region, apigw_region)) lambda_arn = lambda_desc.get('function').get('FunctionArn') lambda_uri = ('arn:aws:apigateway:{0}:lambda:path/2015-03-31' '/functions/{1}/invocations'.format(apigw_region, lambda_arn)) return lambda_uri def _parse_method_data(self, method_name, method_data): ''' Helper function to construct the method request params, models, request_templates and integration_type values needed to configure method request integration/mappings. ''' method_params = {} method_models = {} if 'parameters' in method_data: for param in method_data['parameters']: p = _Swagger.SwaggerParameter(param) if p.name: method_params[p.name] = True if p.schema: method_models['application/json'] = p.schema request_templates = _Swagger.REQUEST_OPTION_TEMPLATE if method_name == 'options' else _Swagger.REQUEST_TEMPLATE integration_type = "MOCK" if method_name == 'options' else "AWS" return {'params': method_params, 'models': method_models, 'request_templates': request_templates, 'integration_type': integration_type} def _find_patterns(self, o): result = [] if isinstance(o, dict): for k, v in six.iteritems(o): if isinstance(v, dict): result.extend(self._find_patterns(v)) else: if k == 'pattern': result.append(v) return result def _get_pattern_for_schema(self, schema_name, httpStatus): ''' returns the pattern specified in a response schema ''' defaultPattern = '.+' if self._is_http_error_rescode(httpStatus) else '.*' model = self._models().get(schema_name) patterns = self._find_patterns(model) return patterns[0] if patterns else defaultPattern def _get_response_template(self, method_name, http_status): if method_name == 'options' or not self._is_http_error_rescode(http_status): response_templates = {'application/json': self._response_template} \ if self._response_template else self.RESPONSE_OPTION_TEMPLATE else: response_templates = {'application/json': self._error_response_template} \ if self._error_response_template else self.RESPONSE_TEMPLATE return response_templates def _parse_method_response(self, method_name, method_response, httpStatus): ''' Helper function to construct the method response params, models, and integration_params values needed to configure method response integration/mappings. ''' method_response_models = {} method_response_pattern = '.*' if method_response.schema: method_response_models['application/json'] = method_response.schema method_response_pattern = self._get_pattern_for_schema(method_response.schema, httpStatus) method_response_params = {} method_integration_response_params = {} for header in method_response.headers: response_header = 'method.response.header.{0}'.format(header) method_response_params[response_header] = False header_data = method_response.headers.get(header) method_integration_response_params[response_header] = ( "'{0}'".format(header_data.get('default')) if 'default' in header_data else "'*'") response_templates = self._get_response_template(method_name, httpStatus) return {'params': method_response_params, 'models': method_response_models, 'integration_params': method_integration_response_params, 'pattern': method_response_pattern, 'response_templates': response_templates} def _deploy_method(self, ret, resource_path, method_name, method_data, api_key_required, lambda_integration_role, lambda_region, authorization_type): ''' Method to create a method for the given resource path, along with its associated request and response integrations. ret a dictionary for returning status to Saltstack resource_path the full resource path where the named method_name will be associated with. method_name a string that is one of the following values: 'delete', 'get', 'head', 'options', 'patch', 'post', 'put' method_data the value dictionary for this method in the swagger definition file. api_key_required True or False, whether api key is required to access this method. lambda_integration_role name of the IAM role or IAM role arn that Api Gateway will assume when executing the associated lambda function lambda_region the region for the lambda function that Api Gateway will integrate to. authorization_type 'NONE' or 'AWS_IAM' ''' method = self._parse_method_data(method_name.lower(), method_data) # for options method to enable CORS, api_key_required will be set to False always. # authorization_type will be set to 'NONE' always. if method_name.lower() == 'options': api_key_required = False authorization_type = 'NONE' m = __salt__['boto_apigateway.create_api_method'](restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), authorizationType=authorization_type, apiKeyRequired=api_key_required, requestParameters=method.get('params'), requestModels=method.get('models'), **self._common_aws_args) if not m.get('created'): ret = _log_error_and_abort(ret, m) return ret ret = _log_changes(ret, '_deploy_method.create_api_method', m) lambda_uri = "" if method_name.lower() != 'options': lambda_uri = self._lambda_uri(self._lambda_name(resource_path, method_name), lambda_region=lambda_region) # NOTE: integration method is set to POST always, as otherwise AWS makes wrong assumptions # about the intent of the call. HTTP method will be passed to lambda as part of the API gateway context integration = ( __salt__['boto_apigateway.create_api_integration'](restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), integrationType=method.get('integration_type'), integrationHttpMethod='POST', uri=lambda_uri, credentials=lambda_integration_role, requestTemplates=method.get('request_templates'), **self._common_aws_args)) if not integration.get('created'): ret = _log_error_and_abort(ret, integration) return ret ret = _log_changes(ret, '_deploy_method.create_api_integration', integration) if 'responses' in method_data: for response, response_data in six.iteritems(method_data['responses']): httpStatus = str(response) # future lint: disable=blacklisted-function method_response = self._parse_method_response(method_name.lower(), _Swagger.SwaggerMethodResponse(response_data), httpStatus) mr = __salt__['boto_apigateway.create_api_method_response']( restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), statusCode=httpStatus, responseParameters=method_response.get('params'), responseModels=method_response.get('models'), **self._common_aws_args) if not mr.get('created'): ret = _log_error_and_abort(ret, mr) return ret ret = _log_changes(ret, '_deploy_method.create_api_method_response', mr) mir = __salt__['boto_apigateway.create_api_integration_response']( restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), statusCode=httpStatus, selectionPattern=method_response.get('pattern'), responseParameters=method_response.get('integration_params'), responseTemplates=method_response.get('response_templates'), **self._common_aws_args) if not mir.get('created'): ret = _log_error_and_abort(ret, mir) return ret ret = _log_changes(ret, '_deploy_method.create_api_integration_response', mir) else: raise ValueError('No responses specified for {0} {1}'.format(resource_path, method_name)) return ret def deploy_resources(self, ret, api_key_required, lambda_integration_role, lambda_region, authorization_type): ''' Method to deploy resources defined in the swagger file. ret a dictionary for returning status to Saltstack api_key_required True or False, whether api key is required to access this method. lambda_integration_role name of the IAM role or IAM role arn that Api Gateway will assume when executing the associated lambda function lambda_region the region for the lambda function that Api Gateway will integrate to. authorization_type 'NONE' or 'AWS_IAM' ''' for path, pathData in self.paths: resource = __salt__['boto_apigateway.create_api_resources'](restApiId=self.restApiId, path=path, **self._common_aws_args) if not resource.get('created'): ret = _log_error_and_abort(ret, resource) return ret ret = _log_changes(ret, 'deploy_resources', resource) for method, method_data in six.iteritems(pathData): if method in _Swagger.SWAGGER_OPERATION_NAMES: ret = self._deploy_method(ret, path, method, method_data, api_key_required, lambda_integration_role, lambda_region, authorization_type) return ret def usage_plan_absent(name, plan_name, region=None, key=None, keyid=None, profile=None): ''' Ensures usage plan identified by name is no longer present .. versionadded:: 2017.7.0 name name of the state plan_name name of the plan to remove .. code-block:: yaml usage plan absent: boto_apigateway.usage_plan_absent: - plan_name: my_usage_plan - profile: my_profile ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) existing = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args) if 'error' in existing: ret['result'] = False ret['comment'] = 'Failed to describe existing usage plans' return ret if not existing['plans']: ret['comment'] = 'Usage plan {0} does not exist already'.format(plan_name) return ret if __opts__['test']: ret['comment'] = 'Usage plan {0} exists and would be deleted'.format(plan_name) ret['result'] = None return ret plan_id = existing['plans'][0]['id'] result = __salt__['boto_apigateway.delete_usage_plan'](plan_id, **common_args) if 'error' in result: ret['result'] = False ret['comment'] = 'Failed to delete usage plan {0}, {1}'.format(plan_name, result) return ret ret['comment'] = 'Usage plan {0} has been deleted'.format(plan_name) ret['changes']['old'] = {'plan': existing['plans'][0]} ret['changes']['new'] = {'plan': None} except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret def usage_plan_association_present(name, plan_name, api_stages, region=None, key=None, keyid=None, profile=None): ''' Ensures usage plan identified by name is added to provided api_stages .. versionadded:: 2017.7.0 name name of the state plan_name name of the plan to use api_stages list of dictionaries, where each dictionary consists of the following keys: apiId apiId of the api to attach usage plan to stage stage name of the api to attach usage plan to .. code-block:: yaml UsagePlanAssociationPresent: boto_apigateway.usage_plan_association_present: - plan_name: my_plan - api_stages: - apiId: 9kb0404ec0 stage: my_stage - apiId: l9v7o2aj90 stage: my_stage - profile: my_profile ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) existing = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args) if 'error' in existing: ret['result'] = False ret['comment'] = 'Failed to describe existing usage plans' return ret if not existing['plans']: ret['comment'] = 'Usage plan {0} does not exist'.format(plan_name) ret['result'] = False return ret if len(existing['plans']) != 1: ret['comment'] = 'There are multiple usage plans with the same name - it is not supported' ret['result'] = False return ret plan = existing['plans'][0] plan_id = plan['id'] plan_stages = plan.get('apiStages', []) stages_to_add = [] for api in api_stages: if api not in plan_stages: stages_to_add.append(api) if not stages_to_add: ret['comment'] = 'Usage plan is already asssociated to all api stages' return ret result = __salt__['boto_apigateway.attach_usage_plan_to_apis'](plan_id, stages_to_add, **common_args) if 'error' in result: ret['comment'] = 'Failed to associate a usage plan {0} to the apis {1}, {2}'.format(plan_name, stages_to_add, result['error']) ret['result'] = False return ret ret['comment'] = 'successfully associated usage plan to apis' ret['changes']['old'] = plan_stages ret['changes']['new'] = result.get('result', {}).get('apiStages', []) except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret def usage_plan_association_absent(name, plan_name, api_stages, region=None, key=None, keyid=None, profile=None): ''' Ensures usage plan identified by name is removed from provided api_stages If a plan is associated to stages not listed in api_stages parameter, those associations remain intact. .. versionadded:: 2017.7.0 name name of the state plan_name name of the plan to use api_stages list of dictionaries, where each dictionary consists of the following keys: apiId apiId of the api to detach usage plan from stage stage name of the api to detach usage plan from .. code-block:: yaml UsagePlanAssociationAbsent: boto_apigateway.usage_plan_association_absent: - plan_name: my_plan - api_stages: - apiId: 9kb0404ec0 stage: my_stage - apiId: l9v7o2aj90 stage: my_stage - profile: my_profile ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) existing = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args) if 'error' in existing: ret['result'] = False ret['comment'] = 'Failed to describe existing usage plans' return ret if not existing['plans']: ret['comment'] = 'Usage plan {0} does not exist'.format(plan_name) ret['result'] = False return ret if len(existing['plans']) != 1: ret['comment'] = 'There are multiple usage plans with the same name - it is not supported' ret['result'] = False return ret plan = existing['plans'][0] plan_id = plan['id'] plan_stages = plan.get('apiStages', []) if not plan_stages: ret['comment'] = 'Usage plan {0} has no associated stages already'.format(plan_name) return ret stages_to_remove = [] for api in api_stages: if api in plan_stages: stages_to_remove.append(api) if not stages_to_remove: ret['comment'] = 'Usage plan is already not asssociated to any api stages' return ret result = __salt__['boto_apigateway.detach_usage_plan_from_apis'](plan_id, stages_to_remove, **common_args) if 'error' in result: ret['comment'] = 'Failed to disassociate a usage plan {0} from the apis {1}, {2}'.format(plan_name, stages_to_remove, result['error']) ret['result'] = False return ret ret['comment'] = 'successfully disassociated usage plan from apis' ret['changes']['old'] = plan_stages ret['changes']['new'] = result.get('result', {}).get('apiStages', []) except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret
saltstack/salt
salt/states/boto_apigateway.py
usage_plan_absent
python
def usage_plan_absent(name, plan_name, region=None, key=None, keyid=None, profile=None): ''' Ensures usage plan identified by name is no longer present .. versionadded:: 2017.7.0 name name of the state plan_name name of the plan to remove .. code-block:: yaml usage plan absent: boto_apigateway.usage_plan_absent: - plan_name: my_usage_plan - profile: my_profile ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) existing = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args) if 'error' in existing: ret['result'] = False ret['comment'] = 'Failed to describe existing usage plans' return ret if not existing['plans']: ret['comment'] = 'Usage plan {0} does not exist already'.format(plan_name) return ret if __opts__['test']: ret['comment'] = 'Usage plan {0} exists and would be deleted'.format(plan_name) ret['result'] = None return ret plan_id = existing['plans'][0]['id'] result = __salt__['boto_apigateway.delete_usage_plan'](plan_id, **common_args) if 'error' in result: ret['result'] = False ret['comment'] = 'Failed to delete usage plan {0}, {1}'.format(plan_name, result) return ret ret['comment'] = 'Usage plan {0} has been deleted'.format(plan_name) ret['changes']['old'] = {'plan': existing['plans'][0]} ret['changes']['new'] = {'plan': None} except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret
Ensures usage plan identified by name is no longer present .. versionadded:: 2017.7.0 name name of the state plan_name name of the plan to remove .. code-block:: yaml usage plan absent: boto_apigateway.usage_plan_absent: - plan_name: my_usage_plan - profile: my_profile
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_apigateway.py#L1829-L1892
null
# -*- coding: utf-8 -*- ''' Manage Apigateway Rest APIs =========================== .. versionadded:: 2016.11.0 :depends: - boto >= 2.8.0 - boto3 >= 1.2.1 - botocore >= 1.4.49 Create and destroy rest apis depending on a swagger version 2 definition file. Be aware that this interacts with Amazon's services, and so may incur charges. This module uses ``boto3``, which can be installed via package, or pip. This module accepts explicit vpc credentials but can also utilize IAM roles assigned to the instance through Instance Profiles. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More information available `here <http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html>`_. If IAM roles are not used you need to specify them either in a pillar file or in the minion's config file: .. code-block:: yaml vpc.keyid: GKTADJGHEIQSXMKKRBJ08H vpc.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs It's also possible to specify ``key``, ``keyid`` and ``region`` via a profile, either passed in as a dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 .. code-block:: yaml Ensure Apigateway API exists: boto_apigateway.present: - name: myfunction - region: us-east-1 - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import hashlib import logging import os import re # Import Salt Libs import salt.utils.files import salt.utils.json import salt.utils.yaml # Import 3rd-party libs from salt.ext import six log = logging.getLogger(__name__) def __virtual__(): ''' Only load if boto is available. ''' return 'boto_apigateway' if 'boto_apigateway.describe_apis' in __salt__ else False def present(name, api_name, swagger_file, stage_name, api_key_required, lambda_integration_role, lambda_region=None, stage_variables=None, region=None, key=None, keyid=None, profile=None, lambda_funcname_format='{stage}_{api}_{resource}_{method}', authorization_type='NONE', error_response_template=None, response_template=None): ''' Ensure the spcified api_name with the corresponding swaggerfile is deployed to the given stage_name in AWS ApiGateway. this state currently only supports ApiGateway integration with AWS Lambda, and CORS support is handled through a Mock integration. There may be multiple deployments for the API object, each deployment is tagged with a description (i.e. unique label) in pretty printed json format consisting of the following key/values. .. code-block:: text { "api_name": api_name, "swagger_file": basename_of_swagger_file "swagger_file_md5sum": md5sum_of_swagger_file, "swagger_info_object": info_object_content_in_swagger_file } Please note that the name of the lambda function to be integrated will be derived via the provided lambda_funcname_format parameters: - the default lambda_funcname_format is a string with the following substitutable keys: "{stage}_{api}_{resource}_{method}". The user can choose to reorder the known keys. - the stage key corresponds to the stage_name passed in. - the api key corresponds to the api_name passed in. - the resource corresponds to the resource path defined in the passed swagger file. - the method corresponds to the method for a resource path defined in the passed swagger file. For the default lambda_funcname_format, given the following input: .. code-block:: python api_name = ' Test Service' stage_name = 'alpha' basePath = '/api' path = '/a/{b}/c' method = 'POST' We will end up with the following Lambda Function Name that will be looked up: 'test_service_alpha_a_b_c_post' The canconicalization of these input parameters is done in the following order: 1. lambda_funcname_format is formatted with the input parameters as passed, 2. resulting string is stripped for leading/trailing spaces, 3. path parameter's curly braces are removed from the resource path, 4. consecutive spaces and forward slashes in the paths are replaced with '_' 5. consecutive '_' are replaced with '_' Please note that for error response handling, the swagger file must have an error response model with the following schema. The lambda functions should throw exceptions for any non successful responses. An optional pattern field can be specified in errorMessage field to aid the response mapping from Lambda to the proper error return status codes. .. code-block:: yaml Error: type: object properties: stackTrace: type: array items: type: array items: type: string description: call stack errorType: type: string description: error type errorMessage: type: string description: | Error message, will be matched based on pattern. If no pattern is specified, the default pattern used for response mapping will be +*. name The name of the state definition api_name The name of the rest api that we want to ensure exists in AWS API Gateway swagger_file Name of the location of the swagger rest api definition file in YAML format. stage_name Name of the stage we want to be associated with the given api_name and swagger_file definition api_key_required True or False - whether the API Key is required to call API methods lambda_integration_role The name or ARN of the IAM role that the AWS ApiGateway assumes when it executes your lambda function to handle incoming requests lambda_region The region where we expect to find the lambda functions. This is used to determine the region where we should look for the Lambda Function for integration purposes. The region determination is based on the following priority: 1. lambda_region as passed in (is not None) 2. if lambda_region is None, use the region as if a boto_lambda function were executed without explicitly specifying lambda region. 3. if region determined in (2) is different than the region used by boto_apigateway functions, a final lookup will be attempted using the boto_apigateway region. stage_variables A dict with variables and their values, or a pillar key (string) that contains a dict with variables and their values. key and values in the dict must be strings. {'string': 'string'} region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. lambda_funcname_format Please review the earlier example for the usage. The only substituable keys in the funcname format are {stage}, {api}, {resource}, {method}. Any other keys or positional subsitution parameters will be flagged as an invalid input. authorization_type This field can be either 'NONE', or 'AWS_IAM'. This will be applied to all methods in the given swagger spec file. Default is set to 'NONE' error_response_template String value that defines the response template mapping that should be applied in cases error occurs. Refer to AWS documentation for details: http://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-mapping-template-reference.html If set to None, the following default value is used: .. code-block:: text '#set($inputRoot = $input.path(\'$\'))\\n' '{\\n' ' "errorMessage" : "$inputRoot.errorMessage",\\n' ' "errorType" : "$inputRoot.errorType",\\n' ' "stackTrace" : [\\n' '#foreach($stackTrace in $inputRoot.stackTrace)\\n' ' [\\n' '#foreach($elem in $stackTrace)\\n' ' "$elem"\\n' '#if($foreach.hasNext),#end\\n' '#end\\n' ' ]\\n' '#if($foreach.hasNext),#end\\n' '#end\\n' ' ]\\n' .. versionadded:: 2017.7.0 response_template String value that defines the response template mapping applied in case of success (including OPTIONS method) If set to None, empty ({}) template is assumed, which will transfer response from the lambda function as is. .. versionadded:: 2017.7.0 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) # try to open the swagger file and basic validation swagger = _Swagger(api_name, stage_name, lambda_funcname_format, swagger_file, error_response_template, response_template, common_args) # retrieve stage variables stage_vars = _get_stage_variables(stage_variables) # verify if api and stage already exists ret = swagger.verify_api(ret) if ret.get('publish'): # there is a deployment label with signature matching the given api_name, # swagger file name, swagger file md5 sum, and swagger file info object # just reassociate the stage_name to the given deployment label. if __opts__['test']: ret['comment'] = ('[stage: {0}] will be reassociated to an already available ' 'deployment that matched the given [api_name: {1}] ' 'and [swagger_file: {2}].\n' 'Stage variables will be set ' 'to {3}.'.format(stage_name, api_name, swagger_file, stage_vars)) ret['result'] = None return ret return swagger.publish_api(ret, stage_vars) if ret.get('current'): # already at desired state for the stage, swagger_file, and api_name if __opts__['test']: ret['comment'] = ('[stage: {0}] is already at desired state with an associated ' 'deployment matching the given [api_name: {1}] ' 'and [swagger_file: {2}].\n' 'Stage variables will be set ' 'to {3}.'.format(stage_name, api_name, swagger_file, stage_vars)) ret['result'] = None return swagger.overwrite_stage_variables(ret, stage_vars) # there doesn't exist any previous deployments for the given swagger_file, we need # to redeploy the content of the swagger file to the api, models, and resources object # and finally create a new deployment and tie the stage_name to this new deployment if __opts__['test']: ret['comment'] = ('There is no deployment matching the given [api_name: {0}] ' 'and [swagger_file: {1}]. A new deployment will be ' 'created and the [stage_name: {2}] will then be associated ' 'to the newly created deployment.\n' 'Stage variables will be set ' 'to {3}.'.format(api_name, swagger_file, stage_name, stage_vars)) ret['result'] = None return ret ret = swagger.deploy_api(ret) if ret.get('abort'): return ret ret = swagger.deploy_models(ret) if ret.get('abort'): return ret ret = swagger.deploy_resources(ret, api_key_required=api_key_required, lambda_integration_role=lambda_integration_role, lambda_region=lambda_region, authorization_type=authorization_type) if ret.get('abort'): return ret ret = swagger.publish_api(ret, stage_vars) except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret def _get_stage_variables(stage_variables): ''' Helper function to retrieve stage variables from pillars/options, if the input is a string ''' ret = dict() if stage_variables is None: return ret if isinstance(stage_variables, six.string_types): if stage_variables in __opts__: ret = __opts__[stage_variables] master_opts = __pillar__.get('master', {}) if stage_variables in master_opts: ret = master_opts[stage_variables] if stage_variables in __pillar__: ret = __pillar__[stage_variables] elif isinstance(stage_variables, dict): ret = stage_variables if not isinstance(ret, dict): ret = dict() return ret def absent(name, api_name, stage_name, nuke_api=False, region=None, key=None, keyid=None, profile=None): ''' Ensure the stage_name associated with the given api_name deployed by boto_apigateway's present state is removed. If the currently associated deployment to the given stage_name has no other stages associated with it, the deployment will also be removed. name Name of the swagger file in YAML format api_name Name of the rest api on AWS ApiGateway to ensure is absent. stage_name Name of the stage to be removed irrespective of the swagger file content. If the current deployment associated with the stage_name has no other stages associated with it, the deployment will also be removed. nuke_api If True, removes the API itself only if there are no other stages associated with any other deployments once the given stage_name is removed. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) swagger = _Swagger(api_name, stage_name, '', None, None, None, common_args) if not swagger.restApiId: ret['comment'] = '[Rest API: {0}] does not exist.'.format(api_name) return ret if __opts__['test']: if nuke_api: ret['comment'] = ('[stage: {0}] will be deleted, if there are no other ' 'active stages, the [api: {1} will also be ' 'deleted.'.format(stage_name, api_name)) else: ret['comment'] = ('[stage: {0}] will be deleted.'.format(stage_name)) ret['result'] = None return ret ret = swagger.delete_stage(ret) if ret.get('abort'): return ret if nuke_api and swagger.no_more_deployments_remain(): ret = swagger.delete_api(ret) except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret # Helper Swagger Class for swagger version 2.0 API specification def _gen_md5_filehash(fname, *args): ''' helper function to generate a md5 hash of the swagger definition file any extra argument passed to the function is converted to a string and participates in the hash calculation ''' _hash = hashlib.md5() with salt.utils.files.fopen(fname, 'rb') as f: for chunk in iter(lambda: f.read(4096), b''): _hash.update(chunk) for extra_arg in args: _hash.update(six.b(str(extra_arg))) return _hash.hexdigest() def _dict_to_json_pretty(d, sort_keys=True): ''' helper function to generate pretty printed json output ''' return salt.utils.json.dumps(d, indent=4, separators=(',', ': '), sort_keys=sort_keys) # Heuristic on whether or not the property name loosely matches given set of 'interesting' factors # If you are interested in IDs for example, 'id', 'blah_id', 'blahId' would all match def _name_matches(name, matches): ''' Helper function to see if given name has any of the patterns in given matches ''' for m in matches: if name.endswith(m): return True if name.lower().endswith('_' + m.lower()): return True if name.lower() == m.lower(): return True return False def _object_reducer(o, names=('id', 'name', 'path', 'httpMethod', 'statusCode', 'Created', 'Deleted', 'Updated', 'Flushed', 'Associated', 'Disassociated')): ''' Helper function to reduce the amount of information that will be kept in the change log for API GW related return values ''' result = {} if isinstance(o, dict): for k, v in six.iteritems(o): if isinstance(v, dict): reduced = v if k == 'variables' else _object_reducer(v, names) if reduced or _name_matches(k, names): result[k] = reduced elif isinstance(v, list): newlist = [] for val in v: reduced = _object_reducer(val, names) if reduced or _name_matches(k, names): newlist.append(reduced) if newlist: result[k] = newlist else: if _name_matches(k, names): result[k] = v return result def _log_changes(ret, changekey, changevalue): ''' For logging create/update/delete operations to AWS ApiGateway ''' cl = ret['changes'].get('new', []) cl.append({changekey: _object_reducer(changevalue)}) ret['changes']['new'] = cl return ret def _log_error_and_abort(ret, obj): ''' helper function to update errors in the return structure ''' ret['result'] = False ret['abort'] = True if 'error' in obj: ret['comment'] = '{0}'.format(obj.get('error')) return ret class _Swagger(object): ''' this is a helper class that holds the swagger definition file and the associated logic related to how to interpret the file and apply it to AWS Api Gateway. The main interface to the outside world is in deploy_api, deploy_models, and deploy_resources methods. ''' SWAGGER_OBJ_V2_FIELDS = ('swagger', 'info', 'host', 'basePath', 'schemes', 'consumes', 'produces', 'paths', 'definitions', 'parameters', 'responses', 'securityDefinitions', 'security', 'tags', 'externalDocs') # SWAGGER OBJECT V2 Fields that are required by boto apigateway states. SWAGGER_OBJ_V2_FIELDS_REQUIRED = ('swagger', 'info', 'basePath', 'schemes', 'paths', 'definitions') # SWAGGER OPERATION NAMES SWAGGER_OPERATION_NAMES = ('get', 'put', 'post', 'delete', 'options', 'head', 'patch') SWAGGER_VERSIONS_SUPPORTED = ('2.0',) # VENDOR SPECIFIC FIELD PATTERNS VENDOR_EXT_PATTERN = re.compile('^x-') # JSON_SCHEMA_REF JSON_SCHEMA_DRAFT_4 = 'http://json-schema.org/draft-04/schema#' # AWS integration templates for normal and options methods REQUEST_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n' '{\n' '"header_params" : {\n' '#set ($map = $input.params().header)\n' '#foreach( $param in $map.entrySet() )\n' '"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n' '#end\n' '},\n' '"query_params" : {\n' '#set ($map = $input.params().querystring)\n' '#foreach( $param in $map.entrySet() )\n' '"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n' '#end\n' '},\n' '"path_params" : {\n' '#set ($map = $input.params().path)\n' '#foreach( $param in $map.entrySet() )\n' '"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n' '#end\n' '},\n' '"apigw_context" : {\n' '"apiId": "$context.apiId",\n' '"httpMethod": "$context.httpMethod",\n' '"requestId": "$context.requestId",\n' '"resourceId": "$context.resourceId",\n' '"resourcePath": "$context.resourcePath",\n' '"stage": "$context.stage",\n' '"identity": {\n' ' "user":"$context.identity.user",\n' ' "userArn":"$context.identity.userArn",\n' ' "userAgent":"$context.identity.userAgent",\n' ' "sourceIp":"$context.identity.sourceIp",\n' ' "cognitoIdentityId":"$context.identity.cognitoIdentityId",\n' ' "cognitoIdentityPoolId":"$context.identity.cognitoIdentityPoolId",\n' ' "cognitoAuthenticationType":"$context.identity.cognitoAuthenticationType",\n' ' "cognitoAuthenticationProvider":["$util.escapeJavaScript($context.identity.cognitoAuthenticationProvider)"],\n' ' "caller":"$context.identity.caller",\n' ' "apiKey":"$context.identity.apiKey",\n' ' "accountId":"$context.identity.accountId"\n' '}\n' '},\n' '"body_params" : $input.json(\'$\'),\n' '"stage_variables": {\n' '#foreach($variable in $stageVariables.keySet())\n' '"$variable": "$util.escapeJavaScript($stageVariables.get($variable))"\n' '#if($foreach.hasNext), #end\n' '#end\n' '}\n' '}'} REQUEST_OPTION_TEMPLATE = {'application/json': '{"statusCode": 200}'} # AWS integration response template mapping to convert stackTrace part or the error # to a uniform format containing strings only. Swagger does not seem to allow defining # an array of non-uniform types, to it is not possible to create error model to match # exactly what comes out of lambda functions in case of error. RESPONSE_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n' '{\n' ' "errorMessage" : "$inputRoot.errorMessage",\n' ' "errorType" : "$inputRoot.errorType",\n' ' "stackTrace" : [\n' '#foreach($stackTrace in $inputRoot.stackTrace)\n' ' [\n' '#foreach($elem in $stackTrace)\n' ' "$elem"\n' '#if($foreach.hasNext),#end\n' '#end\n' ' ]\n' '#if($foreach.hasNext),#end\n' '#end\n' ' ]\n' '}'} RESPONSE_OPTION_TEMPLATE = {} # This string should not be modified, every API created by this state will carry the description # below. AWS_API_DESCRIPTION = _dict_to_json_pretty({"provisioned_by": "Salt boto_apigateway.present State", "context": "See deployment or stage description"}) class SwaggerParameter(object): ''' This is a helper class for the Swagger Parameter Object ''' LOCATIONS = ('body', 'query', 'header', 'path') def __init__(self, paramdict): self._paramdict = paramdict @property def location(self): ''' returns location in the swagger parameter object ''' _location = self._paramdict.get('in') if _location in _Swagger.SwaggerParameter.LOCATIONS: return _location raise ValueError('Unsupported parameter location: {0} in Parameter Object'.format(_location)) @property def name(self): ''' returns parameter name in the swagger parameter object ''' _name = self._paramdict.get('name') if _name: if self.location == 'header': return 'method.request.header.{0}'.format(_name) elif self.location == 'query': return 'method.request.querystring.{0}'.format(_name) elif self.location == 'path': return 'method.request.path.{0}'.format(_name) return None raise ValueError('Parameter must have a name: {0}'.format(_dict_to_json_pretty(self._paramdict))) @property def schema(self): ''' returns the name of the schema given the reference in the swagger parameter object ''' if self.location == 'body': _schema = self._paramdict.get('schema') if _schema: if '$ref' in _schema: schema_name = _schema.get('$ref').split('/')[-1] return schema_name raise ValueError(('Body parameter must have a JSON reference ' 'to the schema definition due to Amazon API restrictions: {0}'.format(self.name))) raise ValueError('Body parameter must have a schema: {0}'.format(self.name)) return None class SwaggerMethodResponse(object): ''' Helper class for Swagger Method Response Object ''' def __init__(self, r): self._r = r @property def schema(self): ''' returns the name of the schema given the reference in the swagger method response object ''' _schema = self._r.get('schema') if _schema: if '$ref' in _schema: return _schema.get('$ref').split('/')[-1] raise ValueError(('Method response must have a JSON reference ' 'to the schema definition: {0}'.format(_schema))) return None @property def headers(self): ''' returns the headers dictionary in the method response object ''' _headers = self._r.get('headers', {}) return _headers def __init__(self, api_name, stage_name, lambda_funcname_format, swagger_file_path, error_response_template, response_template, common_aws_args): self._api_name = api_name self._stage_name = stage_name self._lambda_funcname_format = lambda_funcname_format self._common_aws_args = common_aws_args self._restApiId = '' self._deploymentId = '' self._error_response_template = error_response_template self._response_template = response_template if swagger_file_path is not None: if os.path.exists(swagger_file_path) and os.path.isfile(swagger_file_path): self._swagger_file = swagger_file_path self._md5_filehash = _gen_md5_filehash(self._swagger_file, error_response_template, response_template) with salt.utils.files.fopen(self._swagger_file, 'rb') as sf: self._cfg = salt.utils.yaml.safe_load(sf) self._swagger_version = '' else: raise IOError('Invalid swagger file path, {0}'.format(swagger_file_path)) self._validate_swagger_file() self._validate_lambda_funcname_format() self._resolve_api_id() def _is_http_error_rescode(self, code): ''' Helper function to determine if the passed code is in the 400~599 range of http error codes ''' return bool(re.match(r'^\s*[45]\d\d\s*$', code)) def _validate_error_response_model(self, paths, mods): ''' Helper function to help validate the convention established in the swagger file on how to handle response code mapping/integration ''' for path, ops in paths: for opname, opobj in six.iteritems(ops): if opname not in _Swagger.SWAGGER_OPERATION_NAMES: continue if 'responses' not in opobj: raise ValueError('missing mandatory responses field in path item object') for rescode, resobj in six.iteritems(opobj.get('responses')): if not self._is_http_error_rescode(str(rescode)): # future lint: disable=blacklisted-function continue # only check for response code from 400-599 if 'schema' not in resobj: raise ValueError('missing schema field in path {0}, ' 'op {1}, response {2}'.format(path, opname, rescode)) schemaobj = resobj.get('schema') if '$ref' not in schemaobj: raise ValueError('missing $ref field under schema in ' 'path {0}, op {1}, response {2}'.format(path, opname, rescode)) schemaobjref = schemaobj.get('$ref', '/') modelname = schemaobjref.split('/')[-1] if modelname not in mods: raise ValueError('model schema {0} reference not found ' 'under /definitions'.format(schemaobjref)) model = mods.get(modelname) if model.get('type') != 'object': raise ValueError('model schema {0} must be type object'.format(modelname)) if 'properties' not in model: raise ValueError('model schema {0} must have properties fields'.format(modelname)) modelprops = model.get('properties') if 'errorMessage' not in modelprops: raise ValueError('model schema {0} must have errorMessage as a property to ' 'match AWS convention. If pattern is not set, .+ will ' 'be used'.format(modelname)) def _validate_lambda_funcname_format(self): ''' Checks if the lambda function name format contains only known elements :return: True on success, ValueError raised on error ''' try: if self._lambda_funcname_format: known_kwargs = dict(stage='', api='', resource='', method='') self._lambda_funcname_format.format(**known_kwargs) return True except Exception: raise ValueError('Invalid lambda_funcname_format {0}. Please review ' 'documentation for known substitutable keys'.format(self._lambda_funcname_format)) def _validate_swagger_file(self): ''' High level check/validation of the input swagger file based on https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md This is not a full schema compliance check, but rather make sure that the input file (YAML or JSON) can be read into a dictionary, and we check for the content of the Swagger Object for version and info. ''' # check for any invalid fields for Swagger Object V2 for field in self._cfg: if (field not in _Swagger.SWAGGER_OBJ_V2_FIELDS and not _Swagger.VENDOR_EXT_PATTERN.match(field)): raise ValueError('Invalid Swagger Object Field: {0}'.format(field)) # check for Required Swagger fields by Saltstack boto apigateway state for field in _Swagger.SWAGGER_OBJ_V2_FIELDS_REQUIRED: if field not in self._cfg: raise ValueError('Missing Swagger Object Field: {0}'.format(field)) # check for Swagger Version self._swagger_version = self._cfg.get('swagger') if self._swagger_version not in _Swagger.SWAGGER_VERSIONS_SUPPORTED: raise ValueError('Unsupported Swagger version: {0},' 'Supported versions are {1}'.format(self._swagger_version, _Swagger.SWAGGER_VERSIONS_SUPPORTED)) log.info(type(self._models)) self._validate_error_response_model(self.paths, self._models()) @property def md5_filehash(self): ''' returns md5 hash for the swagger file ''' return self._md5_filehash @property def info(self): ''' returns the swagger info object as a dictionary ''' info = self._cfg.get('info') if not info: raise ValueError('Info Object has no values') return info @property def info_json(self): ''' returns the swagger info object as a pretty printed json string. ''' return _dict_to_json_pretty(self.info) @property def rest_api_name(self): ''' returns the name of the api ''' return self._api_name @property def rest_api_version(self): ''' returns the version field in the swagger info object ''' version = self.info.get('version') if not version: raise ValueError('Missing version value in Info Object') return version def _models(self): ''' returns an iterator for the models specified in the swagger file ''' models = self._cfg.get('definitions') if not models: raise ValueError('Definitions Object has no values, You need to define them in your swagger file') return models def models(self): ''' generator to return the tuple of model and its schema to create on aws. ''' model_dict = self._build_all_dependencies() while True: model = self._get_model_without_dependencies(model_dict) if not model: break yield (model, self._models().get(model)) @property def paths(self): ''' returns an iterator for the relative resource paths specified in the swagger file ''' paths = self._cfg.get('paths') if not paths: raise ValueError('Paths Object has no values, You need to define them in your swagger file') for path in paths: if not path.startswith('/'): raise ValueError('Path object {0} should start with /. Please fix it'.format(path)) return six.iteritems(paths) @property def basePath(self): ''' returns the base path field as defined in the swagger file ''' basePath = self._cfg.get('basePath', '') return basePath @property def restApiId(self): ''' returns the rest api id as returned by AWS on creation of the rest api ''' return self._restApiId @restApiId.setter def restApiId(self, restApiId): ''' allows the assignment of the rest api id on creation of the rest api ''' self._restApiId = restApiId @property def deployment_label_json(self): ''' this property returns the unique description in pretty printed json for a particular api deployment ''' return _dict_to_json_pretty(self.deployment_label) @property def deployment_label(self): ''' this property returns the deployment label dictionary (mainly used by stage description) ''' label = dict() label['swagger_info_object'] = self.info label['api_name'] = self.rest_api_name label['swagger_file'] = os.path.basename(self._swagger_file) label['swagger_file_md5sum'] = self.md5_filehash return label # methods to interact with boto_apigateway execution modules def _one_or_more_stages_remain(self, deploymentId): ''' Helper function to find whether there are other stages still associated with a deployment ''' stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args).get('stages') return bool(stages) def no_more_deployments_remain(self): ''' Helper function to find whether there are deployments left with stages associated ''' no_more_deployments = True deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId, **self._common_aws_args).get('deployments') if deployments: for deployment in deployments: deploymentId = deployment.get('id') stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args).get('stages') if stages: no_more_deployments = False break return no_more_deployments def _get_current_deployment_id(self): ''' Helper method to find the deployment id that the stage name is currently assocaited with. ''' deploymentId = '' stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, **self._common_aws_args).get('stage') if stage: deploymentId = stage.get('deploymentId') return deploymentId def _get_current_deployment_label(self): ''' Helper method to find the deployment label that the stage_name is currently associated with. ''' deploymentId = self._get_current_deployment_id() deployment = __salt__['boto_apigateway.describe_api_deployment'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args).get('deployment') if deployment: return deployment.get('description') return None def _get_desired_deployment_id(self): ''' Helper method to return the deployment id matching the desired deployment label for this Swagger object based on the given api_name, swagger_file ''' deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId, **self._common_aws_args).get('deployments') if deployments: for deployment in deployments: if deployment.get('description') == self.deployment_label_json: return deployment.get('id') return '' def overwrite_stage_variables(self, ret, stage_variables): ''' overwrite the given stage_name's stage variables with the given stage_variables ''' res = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId, stageName=self._stage_name, variables=stage_variables, **self._common_aws_args) if not res.get('overwrite'): ret['result'] = False ret['abort'] = True ret['comment'] = res.get('error') else: ret = _log_changes(ret, 'overwrite_stage_variables', res.get('stage')) return ret def _set_current_deployment(self, stage_desc_json, stage_variables): ''' Helper method to associate the stage_name to the given deploymentId and make this current ''' stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, **self._common_aws_args).get('stage') if not stage: stage = __salt__['boto_apigateway.create_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, deploymentId=self._deploymentId, description=stage_desc_json, variables=stage_variables, **self._common_aws_args) if not stage.get('stage'): return {'set': False, 'error': stage.get('error')} else: # overwrite the stage variables overwrite = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId, stageName=self._stage_name, variables=stage_variables, **self._common_aws_args) if not overwrite.get('stage'): return {'set': False, 'error': overwrite.get('error')} return __salt__['boto_apigateway.activate_api_deployment'](restApiId=self.restApiId, stageName=self._stage_name, deploymentId=self._deploymentId, **self._common_aws_args) def _resolve_api_id(self): ''' returns an Api Id that matches the given api_name and the hardcoded _Swagger.AWS_API_DESCRIPTION as the api description ''' apis = __salt__['boto_apigateway.describe_apis'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args).get('restapi') if apis: if len(apis) == 1: self.restApiId = apis[0].get('id') else: raise ValueError('Multiple APIs matching given name {0} and ' 'description {1}'.format(self.rest_api_name, self.info_json)) def delete_stage(self, ret): ''' Method to delete the given stage_name. If the current deployment tied to the given stage_name has no other stages associated with it, the deployment will be removed as well ''' deploymentId = self._get_current_deployment_id() if deploymentId: result = __salt__['boto_apigateway.delete_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, **self._common_aws_args) if not result.get('deleted'): ret['abort'] = True ret['result'] = False ret['comment'] = 'delete_stage delete_api_stage, {0}'.format(result.get('error')) else: # check if it is safe to delete the deployment as well. if not self._one_or_more_stages_remain(deploymentId): result = __salt__['boto_apigateway.delete_api_deployment'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args) if not result.get('deleted'): ret['abort'] = True ret['result'] = False ret['comment'] = 'delete_stage delete_api_deployment, {0}'.format(result.get('error')) else: ret['comment'] = 'stage {0} has been deleted.\n'.format(self._stage_name) else: # no matching stage_name/deployment found ret['comment'] = 'stage {0} does not exist'.format(self._stage_name) return ret def verify_api(self, ret): ''' this method helps determine if the given stage_name is already on a deployment label matching the input api_name, swagger_file. If yes, returns abort with comment indicating already at desired state. If not and there is previous deployment labels in AWS matching the given input api_name and swagger file, indicate to the caller that we only need to reassociate stage_name to the previously existing deployment label. ''' if self.restApiId: deployed_label_json = self._get_current_deployment_label() if deployed_label_json == self.deployment_label_json: ret['comment'] = ('Already at desired state, the stage {0} is already at the desired ' 'deployment label:\n{1}'.format(self._stage_name, deployed_label_json)) ret['current'] = True return ret else: self._deploymentId = self._get_desired_deployment_id() if self._deploymentId: ret['publish'] = True return ret def publish_api(self, ret, stage_variables): ''' this method tie the given stage_name to a deployment matching the given swagger_file ''' stage_desc = dict() stage_desc['current_deployment_label'] = self.deployment_label stage_desc_json = _dict_to_json_pretty(stage_desc) if self._deploymentId: # just do a reassociate of stage_name to an already existing deployment res = self._set_current_deployment(stage_desc_json, stage_variables) if not res.get('set'): ret['abort'] = True ret['result'] = False ret['comment'] = res.get('error') else: ret = _log_changes(ret, 'publish_api (reassociate deployment, set stage_variables)', res.get('response')) else: # no deployment existed for the given swagger_file for this Swagger object res = __salt__['boto_apigateway.create_api_deployment'](restApiId=self.restApiId, stageName=self._stage_name, stageDescription=stage_desc_json, description=self.deployment_label_json, variables=stage_variables, **self._common_aws_args) if not res.get('created'): ret['abort'] = True ret['result'] = False ret['comment'] = res.get('error') else: ret = _log_changes(ret, 'publish_api (new deployment)', res.get('deployment')) return ret def _cleanup_api(self): ''' Helper method to clean up resources and models if we detected a change in the swagger file for a stage ''' resources = __salt__['boto_apigateway.describe_api_resources'](restApiId=self.restApiId, **self._common_aws_args) if resources.get('resources'): res = resources.get('resources')[1:] res.reverse() for resource in res: delres = __salt__['boto_apigateway.delete_api_resources'](restApiId=self.restApiId, path=resource.get('path'), **self._common_aws_args) if not delres.get('deleted'): return delres models = __salt__['boto_apigateway.describe_api_models'](restApiId=self.restApiId, **self._common_aws_args) if models.get('models'): for model in models.get('models'): delres = __salt__['boto_apigateway.delete_api_model'](restApiId=self.restApiId, modelName=model.get('name'), **self._common_aws_args) if not delres.get('deleted'): return delres return {'deleted': True} def deploy_api(self, ret): ''' this method create the top level rest api in AWS apigateway ''' if self.restApiId: res = self._cleanup_api() if not res.get('deleted'): ret['comment'] = 'Failed to cleanup restAreId {0}'.format(self.restApiId) ret['abort'] = True ret['result'] = False return ret return ret response = __salt__['boto_apigateway.create_api'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args) if not response.get('created'): ret['result'] = False ret['abort'] = True if 'error' in response: ret['comment'] = 'Failed to create rest api: {0}.'.format(response['error']['message']) return ret self.restApiId = response.get('restapi', {}).get('id') return _log_changes(ret, 'deploy_api', response.get('restapi')) def delete_api(self, ret): ''' Method to delete a Rest Api named defined in the swagger file's Info Object's title value. ret a dictionary for returning status to Saltstack ''' exists_response = __salt__['boto_apigateway.api_exists'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args) if exists_response.get('exists'): if __opts__['test']: ret['comment'] = 'Rest API named {0} is set to be deleted.'.format(self.rest_api_name) ret['result'] = None ret['abort'] = True return ret delete_api_response = __salt__['boto_apigateway.delete_api'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args) if not delete_api_response.get('deleted'): ret['result'] = False ret['abort'] = True if 'error' in delete_api_response: ret['comment'] = 'Failed to delete rest api: {0}.'.format(delete_api_response['error']['message']) return ret ret = _log_changes(ret, 'delete_api', delete_api_response) else: ret['comment'] = ('api already absent for swagger file: ' '{0}, desc: {1}'.format(self.rest_api_name, self.info_json)) return ret def _aws_model_ref_from_swagger_ref(self, r): ''' Helper function to reference models created on aws apigw ''' model_name = r.split('/')[-1] return 'https://apigateway.amazonaws.com/restapis/{0}/models/{1}'.format(self.restApiId, model_name) def _update_schema_to_aws_notation(self, schema): ''' Helper function to map model schema to aws notation ''' result = {} for k, v in schema.items(): if k == '$ref': v = self._aws_model_ref_from_swagger_ref(v) if isinstance(v, dict): v = self._update_schema_to_aws_notation(v) result[k] = v return result def _build_dependent_model_list(self, obj_schema): ''' Helper function to build the list of models the given object schema is referencing. ''' dep_models_list = [] if obj_schema: obj_schema['type'] = obj_schema.get('type', 'object') if obj_schema['type'] == 'array': dep_models_list.extend(self._build_dependent_model_list(obj_schema.get('items', {}))) else: ref = obj_schema.get('$ref') if ref: ref_obj_model = ref.split("/")[-1] ref_obj_schema = self._models().get(ref_obj_model) dep_models_list.extend(self._build_dependent_model_list(ref_obj_schema)) dep_models_list.extend([ref_obj_model]) else: # need to walk each property object properties = obj_schema.get('properties') if properties: for _, prop_obj_schema in six.iteritems(properties): dep_models_list.extend(self._build_dependent_model_list(prop_obj_schema)) return list(set(dep_models_list)) def _build_all_dependencies(self): ''' Helper function to build a map of model to their list of model reference dependencies ''' ret = {} for model, schema in six.iteritems(self._models()): dep_list = self._build_dependent_model_list(schema) ret[model] = dep_list return ret def _get_model_without_dependencies(self, models_dict): ''' Helper function to find the next model that should be created ''' next_model = None if not models_dict: return next_model for model, dependencies in six.iteritems(models_dict): if dependencies == []: next_model = model break if next_model is None: raise ValueError('incomplete model definitions, models in dependency ' 'list not defined: {0}'.format(models_dict)) # remove the model from other depednencies before returning models_dict.pop(next_model) for model, dep_list in six.iteritems(models_dict): if next_model in dep_list: dep_list.remove(next_model) return next_model def deploy_models(self, ret): ''' Method to deploy swagger file's definition objects and associated schema to AWS Apigateway as Models ret a dictionary for returning status to Saltstack ''' for model, schema in self.models(): # add in a few attributes into the model schema that AWS expects # _schema = schema.copy() _schema = self._update_schema_to_aws_notation(schema) _schema.update({'$schema': _Swagger.JSON_SCHEMA_DRAFT_4, 'title': '{0} Schema'.format(model)}) # check to see if model already exists, aws has 2 default models [Empty, Error] # which may need upate with data from swagger file model_exists_response = __salt__['boto_apigateway.api_model_exists'](restApiId=self.restApiId, modelName=model, **self._common_aws_args) if model_exists_response.get('exists'): update_model_schema_response = ( __salt__['boto_apigateway.update_api_model_schema'](restApiId=self.restApiId, modelName=model, schema=_dict_to_json_pretty(_schema), **self._common_aws_args)) if not update_model_schema_response.get('updated'): ret['result'] = False ret['abort'] = True if 'error' in update_model_schema_response: ret['comment'] = ('Failed to update existing model {0} with schema {1}, ' 'error: {2}'.format(model, _dict_to_json_pretty(schema), update_model_schema_response['error']['message'])) return ret ret = _log_changes(ret, 'deploy_models', update_model_schema_response) else: create_model_response = ( __salt__['boto_apigateway.create_api_model'](restApiId=self.restApiId, modelName=model, modelDescription=model, schema=_dict_to_json_pretty(_schema), contentType='application/json', **self._common_aws_args)) if not create_model_response.get('created'): ret['result'] = False ret['abort'] = True if 'error' in create_model_response: ret['comment'] = ('Failed to create model {0}, schema {1}, ' 'error: {2}'.format(model, _dict_to_json_pretty(schema), create_model_response['error']['message'])) return ret ret = _log_changes(ret, 'deploy_models', create_model_response) return ret def _lambda_name(self, resourcePath, httpMethod): ''' Helper method to construct lambda name based on the rule specified in doc string of boto_apigateway.api_present function ''' lambda_name = self._lambda_funcname_format.format(stage=self._stage_name, api=self.rest_api_name, resource=resourcePath, method=httpMethod) lambda_name = lambda_name.strip() lambda_name = re.sub(r'{|}', '', lambda_name) lambda_name = re.sub(r'\s+|/', '_', lambda_name).lower() return re.sub(r'_+', '_', lambda_name) def _lambda_uri(self, lambda_name, lambda_region): ''' Helper Method to construct the lambda uri for use in method integration ''' profile = self._common_aws_args.get('profile') region = self._common_aws_args.get('region') lambda_region = __utils__['boto3.get_region']('lambda', lambda_region, profile) apigw_region = __utils__['boto3.get_region']('apigateway', region, profile) lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args) if lambda_region != apigw_region: if not lambda_desc.get('function'): # try look up in the same region as the apigateway as well if previous lookup failed lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args) if not lambda_desc.get('function'): raise ValueError('Could not find lambda function {0} in ' 'regions [{1}, {2}].'.format(lambda_name, lambda_region, apigw_region)) lambda_arn = lambda_desc.get('function').get('FunctionArn') lambda_uri = ('arn:aws:apigateway:{0}:lambda:path/2015-03-31' '/functions/{1}/invocations'.format(apigw_region, lambda_arn)) return lambda_uri def _parse_method_data(self, method_name, method_data): ''' Helper function to construct the method request params, models, request_templates and integration_type values needed to configure method request integration/mappings. ''' method_params = {} method_models = {} if 'parameters' in method_data: for param in method_data['parameters']: p = _Swagger.SwaggerParameter(param) if p.name: method_params[p.name] = True if p.schema: method_models['application/json'] = p.schema request_templates = _Swagger.REQUEST_OPTION_TEMPLATE if method_name == 'options' else _Swagger.REQUEST_TEMPLATE integration_type = "MOCK" if method_name == 'options' else "AWS" return {'params': method_params, 'models': method_models, 'request_templates': request_templates, 'integration_type': integration_type} def _find_patterns(self, o): result = [] if isinstance(o, dict): for k, v in six.iteritems(o): if isinstance(v, dict): result.extend(self._find_patterns(v)) else: if k == 'pattern': result.append(v) return result def _get_pattern_for_schema(self, schema_name, httpStatus): ''' returns the pattern specified in a response schema ''' defaultPattern = '.+' if self._is_http_error_rescode(httpStatus) else '.*' model = self._models().get(schema_name) patterns = self._find_patterns(model) return patterns[0] if patterns else defaultPattern def _get_response_template(self, method_name, http_status): if method_name == 'options' or not self._is_http_error_rescode(http_status): response_templates = {'application/json': self._response_template} \ if self._response_template else self.RESPONSE_OPTION_TEMPLATE else: response_templates = {'application/json': self._error_response_template} \ if self._error_response_template else self.RESPONSE_TEMPLATE return response_templates def _parse_method_response(self, method_name, method_response, httpStatus): ''' Helper function to construct the method response params, models, and integration_params values needed to configure method response integration/mappings. ''' method_response_models = {} method_response_pattern = '.*' if method_response.schema: method_response_models['application/json'] = method_response.schema method_response_pattern = self._get_pattern_for_schema(method_response.schema, httpStatus) method_response_params = {} method_integration_response_params = {} for header in method_response.headers: response_header = 'method.response.header.{0}'.format(header) method_response_params[response_header] = False header_data = method_response.headers.get(header) method_integration_response_params[response_header] = ( "'{0}'".format(header_data.get('default')) if 'default' in header_data else "'*'") response_templates = self._get_response_template(method_name, httpStatus) return {'params': method_response_params, 'models': method_response_models, 'integration_params': method_integration_response_params, 'pattern': method_response_pattern, 'response_templates': response_templates} def _deploy_method(self, ret, resource_path, method_name, method_data, api_key_required, lambda_integration_role, lambda_region, authorization_type): ''' Method to create a method for the given resource path, along with its associated request and response integrations. ret a dictionary for returning status to Saltstack resource_path the full resource path where the named method_name will be associated with. method_name a string that is one of the following values: 'delete', 'get', 'head', 'options', 'patch', 'post', 'put' method_data the value dictionary for this method in the swagger definition file. api_key_required True or False, whether api key is required to access this method. lambda_integration_role name of the IAM role or IAM role arn that Api Gateway will assume when executing the associated lambda function lambda_region the region for the lambda function that Api Gateway will integrate to. authorization_type 'NONE' or 'AWS_IAM' ''' method = self._parse_method_data(method_name.lower(), method_data) # for options method to enable CORS, api_key_required will be set to False always. # authorization_type will be set to 'NONE' always. if method_name.lower() == 'options': api_key_required = False authorization_type = 'NONE' m = __salt__['boto_apigateway.create_api_method'](restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), authorizationType=authorization_type, apiKeyRequired=api_key_required, requestParameters=method.get('params'), requestModels=method.get('models'), **self._common_aws_args) if not m.get('created'): ret = _log_error_and_abort(ret, m) return ret ret = _log_changes(ret, '_deploy_method.create_api_method', m) lambda_uri = "" if method_name.lower() != 'options': lambda_uri = self._lambda_uri(self._lambda_name(resource_path, method_name), lambda_region=lambda_region) # NOTE: integration method is set to POST always, as otherwise AWS makes wrong assumptions # about the intent of the call. HTTP method will be passed to lambda as part of the API gateway context integration = ( __salt__['boto_apigateway.create_api_integration'](restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), integrationType=method.get('integration_type'), integrationHttpMethod='POST', uri=lambda_uri, credentials=lambda_integration_role, requestTemplates=method.get('request_templates'), **self._common_aws_args)) if not integration.get('created'): ret = _log_error_and_abort(ret, integration) return ret ret = _log_changes(ret, '_deploy_method.create_api_integration', integration) if 'responses' in method_data: for response, response_data in six.iteritems(method_data['responses']): httpStatus = str(response) # future lint: disable=blacklisted-function method_response = self._parse_method_response(method_name.lower(), _Swagger.SwaggerMethodResponse(response_data), httpStatus) mr = __salt__['boto_apigateway.create_api_method_response']( restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), statusCode=httpStatus, responseParameters=method_response.get('params'), responseModels=method_response.get('models'), **self._common_aws_args) if not mr.get('created'): ret = _log_error_and_abort(ret, mr) return ret ret = _log_changes(ret, '_deploy_method.create_api_method_response', mr) mir = __salt__['boto_apigateway.create_api_integration_response']( restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), statusCode=httpStatus, selectionPattern=method_response.get('pattern'), responseParameters=method_response.get('integration_params'), responseTemplates=method_response.get('response_templates'), **self._common_aws_args) if not mir.get('created'): ret = _log_error_and_abort(ret, mir) return ret ret = _log_changes(ret, '_deploy_method.create_api_integration_response', mir) else: raise ValueError('No responses specified for {0} {1}'.format(resource_path, method_name)) return ret def deploy_resources(self, ret, api_key_required, lambda_integration_role, lambda_region, authorization_type): ''' Method to deploy resources defined in the swagger file. ret a dictionary for returning status to Saltstack api_key_required True or False, whether api key is required to access this method. lambda_integration_role name of the IAM role or IAM role arn that Api Gateway will assume when executing the associated lambda function lambda_region the region for the lambda function that Api Gateway will integrate to. authorization_type 'NONE' or 'AWS_IAM' ''' for path, pathData in self.paths: resource = __salt__['boto_apigateway.create_api_resources'](restApiId=self.restApiId, path=path, **self._common_aws_args) if not resource.get('created'): ret = _log_error_and_abort(ret, resource) return ret ret = _log_changes(ret, 'deploy_resources', resource) for method, method_data in six.iteritems(pathData): if method in _Swagger.SWAGGER_OPERATION_NAMES: ret = self._deploy_method(ret, path, method, method_data, api_key_required, lambda_integration_role, lambda_region, authorization_type) return ret def usage_plan_present(name, plan_name, description=None, throttle=None, quota=None, region=None, key=None, keyid=None, profile=None): ''' Ensure the spcifieda usage plan with the corresponding metrics is deployed .. versionadded:: 2017.7.0 name name of the state plan_name [Required] name of the usage plan throttle [Optional] throttling parameters expressed as a dictionary. If provided, at least one of the throttling parameters must be present rateLimit rate per second at which capacity bucket is populated burstLimit maximum rate allowed quota [Optional] quota on the number of api calls permitted by the plan. If provided, limit and period must be present limit [Required] number of calls permitted per quota period offset [Optional] number of calls to be subtracted from the limit at the beginning of the period period [Required] period to which quota applies. Must be DAY, WEEK or MONTH .. code-block:: yaml UsagePlanPresent: boto_apigateway.usage_plan_present: - plan_name: my_usage_plan - throttle: rateLimit: 70 burstLimit: 100 - quota: limit: 1000 offset: 0 period: DAY - profile: my_profile ''' func_params = locals() ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) existing = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args) if 'error' in existing: ret['result'] = False ret['comment'] = 'Failed to describe existing usage plans' return ret if not existing['plans']: # plan does not exist, we need to create it if __opts__['test']: ret['comment'] = 'a new usage plan {0} would be created'.format(plan_name) ret['result'] = None return ret result = __salt__['boto_apigateway.create_usage_plan'](name=plan_name, description=description, throttle=throttle, quota=quota, **common_args) if 'error' in result: ret['result'] = False ret['comment'] = 'Failed to create a usage plan {0}, {1}'.format(plan_name, result['error']) return ret ret['changes']['old'] = {'plan': None} ret['comment'] = 'A new usage plan {0} has been created'.format(plan_name) else: # need an existing plan modified to match given value plan = existing['plans'][0] needs_updating = False modifiable_params = (('throttle', ('rateLimit', 'burstLimit')), ('quota', ('limit', 'offset', 'period'))) for p, fields in modifiable_params: for f in fields: actual_param = {} if func_params.get(p) is None else func_params.get(p) if plan.get(p, {}).get(f, None) != actual_param.get(f, None): needs_updating = True break if not needs_updating: ret['comment'] = 'usage plan {0} is already in a correct state'.format(plan_name) ret['result'] = True return ret if __opts__['test']: ret['comment'] = 'a new usage plan {0} would be updated'.format(plan_name) ret['result'] = None return ret result = __salt__['boto_apigateway.update_usage_plan'](plan['id'], throttle=throttle, quota=quota, **common_args) if 'error' in result: ret['result'] = False ret['comment'] = 'Failed to update a usage plan {0}, {1}'.format(plan_name, result['error']) return ret ret['changes']['old'] = {'plan': plan} ret['comment'] = 'usage plan {0} has been updated'.format(plan_name) newstate = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args) if 'error' in existing: ret['result'] = False ret['comment'] = 'Failed to describe existing usage plans after updates' return ret ret['changes']['new'] = {'plan': newstate['plans'][0]} except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret def usage_plan_association_present(name, plan_name, api_stages, region=None, key=None, keyid=None, profile=None): ''' Ensures usage plan identified by name is added to provided api_stages .. versionadded:: 2017.7.0 name name of the state plan_name name of the plan to use api_stages list of dictionaries, where each dictionary consists of the following keys: apiId apiId of the api to attach usage plan to stage stage name of the api to attach usage plan to .. code-block:: yaml UsagePlanAssociationPresent: boto_apigateway.usage_plan_association_present: - plan_name: my_plan - api_stages: - apiId: 9kb0404ec0 stage: my_stage - apiId: l9v7o2aj90 stage: my_stage - profile: my_profile ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) existing = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args) if 'error' in existing: ret['result'] = False ret['comment'] = 'Failed to describe existing usage plans' return ret if not existing['plans']: ret['comment'] = 'Usage plan {0} does not exist'.format(plan_name) ret['result'] = False return ret if len(existing['plans']) != 1: ret['comment'] = 'There are multiple usage plans with the same name - it is not supported' ret['result'] = False return ret plan = existing['plans'][0] plan_id = plan['id'] plan_stages = plan.get('apiStages', []) stages_to_add = [] for api in api_stages: if api not in plan_stages: stages_to_add.append(api) if not stages_to_add: ret['comment'] = 'Usage plan is already asssociated to all api stages' return ret result = __salt__['boto_apigateway.attach_usage_plan_to_apis'](plan_id, stages_to_add, **common_args) if 'error' in result: ret['comment'] = 'Failed to associate a usage plan {0} to the apis {1}, {2}'.format(plan_name, stages_to_add, result['error']) ret['result'] = False return ret ret['comment'] = 'successfully associated usage plan to apis' ret['changes']['old'] = plan_stages ret['changes']['new'] = result.get('result', {}).get('apiStages', []) except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret def usage_plan_association_absent(name, plan_name, api_stages, region=None, key=None, keyid=None, profile=None): ''' Ensures usage plan identified by name is removed from provided api_stages If a plan is associated to stages not listed in api_stages parameter, those associations remain intact. .. versionadded:: 2017.7.0 name name of the state plan_name name of the plan to use api_stages list of dictionaries, where each dictionary consists of the following keys: apiId apiId of the api to detach usage plan from stage stage name of the api to detach usage plan from .. code-block:: yaml UsagePlanAssociationAbsent: boto_apigateway.usage_plan_association_absent: - plan_name: my_plan - api_stages: - apiId: 9kb0404ec0 stage: my_stage - apiId: l9v7o2aj90 stage: my_stage - profile: my_profile ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) existing = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args) if 'error' in existing: ret['result'] = False ret['comment'] = 'Failed to describe existing usage plans' return ret if not existing['plans']: ret['comment'] = 'Usage plan {0} does not exist'.format(plan_name) ret['result'] = False return ret if len(existing['plans']) != 1: ret['comment'] = 'There are multiple usage plans with the same name - it is not supported' ret['result'] = False return ret plan = existing['plans'][0] plan_id = plan['id'] plan_stages = plan.get('apiStages', []) if not plan_stages: ret['comment'] = 'Usage plan {0} has no associated stages already'.format(plan_name) return ret stages_to_remove = [] for api in api_stages: if api in plan_stages: stages_to_remove.append(api) if not stages_to_remove: ret['comment'] = 'Usage plan is already not asssociated to any api stages' return ret result = __salt__['boto_apigateway.detach_usage_plan_from_apis'](plan_id, stages_to_remove, **common_args) if 'error' in result: ret['comment'] = 'Failed to disassociate a usage plan {0} from the apis {1}, {2}'.format(plan_name, stages_to_remove, result['error']) ret['result'] = False return ret ret['comment'] = 'successfully disassociated usage plan from apis' ret['changes']['old'] = plan_stages ret['changes']['new'] = result.get('result', {}).get('apiStages', []) except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret
saltstack/salt
salt/states/boto_apigateway.py
usage_plan_association_present
python
def usage_plan_association_present(name, plan_name, api_stages, region=None, key=None, keyid=None, profile=None): ''' Ensures usage plan identified by name is added to provided api_stages .. versionadded:: 2017.7.0 name name of the state plan_name name of the plan to use api_stages list of dictionaries, where each dictionary consists of the following keys: apiId apiId of the api to attach usage plan to stage stage name of the api to attach usage plan to .. code-block:: yaml UsagePlanAssociationPresent: boto_apigateway.usage_plan_association_present: - plan_name: my_plan - api_stages: - apiId: 9kb0404ec0 stage: my_stage - apiId: l9v7o2aj90 stage: my_stage - profile: my_profile ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) existing = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args) if 'error' in existing: ret['result'] = False ret['comment'] = 'Failed to describe existing usage plans' return ret if not existing['plans']: ret['comment'] = 'Usage plan {0} does not exist'.format(plan_name) ret['result'] = False return ret if len(existing['plans']) != 1: ret['comment'] = 'There are multiple usage plans with the same name - it is not supported' ret['result'] = False return ret plan = existing['plans'][0] plan_id = plan['id'] plan_stages = plan.get('apiStages', []) stages_to_add = [] for api in api_stages: if api not in plan_stages: stages_to_add.append(api) if not stages_to_add: ret['comment'] = 'Usage plan is already asssociated to all api stages' return ret result = __salt__['boto_apigateway.attach_usage_plan_to_apis'](plan_id, stages_to_add, **common_args) if 'error' in result: ret['comment'] = 'Failed to associate a usage plan {0} to the apis {1}, {2}'.format(plan_name, stages_to_add, result['error']) ret['result'] = False return ret ret['comment'] = 'successfully associated usage plan to apis' ret['changes']['old'] = plan_stages ret['changes']['new'] = result.get('result', {}).get('apiStages', []) except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret
Ensures usage plan identified by name is added to provided api_stages .. versionadded:: 2017.7.0 name name of the state plan_name name of the plan to use api_stages list of dictionaries, where each dictionary consists of the following keys: apiId apiId of the api to attach usage plan to stage stage name of the api to attach usage plan to .. code-block:: yaml UsagePlanAssociationPresent: boto_apigateway.usage_plan_association_present: - plan_name: my_plan - api_stages: - apiId: 9kb0404ec0 stage: my_stage - apiId: l9v7o2aj90 stage: my_stage - profile: my_profile
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_apigateway.py#L1895-L1985
null
# -*- coding: utf-8 -*- ''' Manage Apigateway Rest APIs =========================== .. versionadded:: 2016.11.0 :depends: - boto >= 2.8.0 - boto3 >= 1.2.1 - botocore >= 1.4.49 Create and destroy rest apis depending on a swagger version 2 definition file. Be aware that this interacts with Amazon's services, and so may incur charges. This module uses ``boto3``, which can be installed via package, or pip. This module accepts explicit vpc credentials but can also utilize IAM roles assigned to the instance through Instance Profiles. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More information available `here <http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html>`_. If IAM roles are not used you need to specify them either in a pillar file or in the minion's config file: .. code-block:: yaml vpc.keyid: GKTADJGHEIQSXMKKRBJ08H vpc.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs It's also possible to specify ``key``, ``keyid`` and ``region`` via a profile, either passed in as a dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 .. code-block:: yaml Ensure Apigateway API exists: boto_apigateway.present: - name: myfunction - region: us-east-1 - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import hashlib import logging import os import re # Import Salt Libs import salt.utils.files import salt.utils.json import salt.utils.yaml # Import 3rd-party libs from salt.ext import six log = logging.getLogger(__name__) def __virtual__(): ''' Only load if boto is available. ''' return 'boto_apigateway' if 'boto_apigateway.describe_apis' in __salt__ else False def present(name, api_name, swagger_file, stage_name, api_key_required, lambda_integration_role, lambda_region=None, stage_variables=None, region=None, key=None, keyid=None, profile=None, lambda_funcname_format='{stage}_{api}_{resource}_{method}', authorization_type='NONE', error_response_template=None, response_template=None): ''' Ensure the spcified api_name with the corresponding swaggerfile is deployed to the given stage_name in AWS ApiGateway. this state currently only supports ApiGateway integration with AWS Lambda, and CORS support is handled through a Mock integration. There may be multiple deployments for the API object, each deployment is tagged with a description (i.e. unique label) in pretty printed json format consisting of the following key/values. .. code-block:: text { "api_name": api_name, "swagger_file": basename_of_swagger_file "swagger_file_md5sum": md5sum_of_swagger_file, "swagger_info_object": info_object_content_in_swagger_file } Please note that the name of the lambda function to be integrated will be derived via the provided lambda_funcname_format parameters: - the default lambda_funcname_format is a string with the following substitutable keys: "{stage}_{api}_{resource}_{method}". The user can choose to reorder the known keys. - the stage key corresponds to the stage_name passed in. - the api key corresponds to the api_name passed in. - the resource corresponds to the resource path defined in the passed swagger file. - the method corresponds to the method for a resource path defined in the passed swagger file. For the default lambda_funcname_format, given the following input: .. code-block:: python api_name = ' Test Service' stage_name = 'alpha' basePath = '/api' path = '/a/{b}/c' method = 'POST' We will end up with the following Lambda Function Name that will be looked up: 'test_service_alpha_a_b_c_post' The canconicalization of these input parameters is done in the following order: 1. lambda_funcname_format is formatted with the input parameters as passed, 2. resulting string is stripped for leading/trailing spaces, 3. path parameter's curly braces are removed from the resource path, 4. consecutive spaces and forward slashes in the paths are replaced with '_' 5. consecutive '_' are replaced with '_' Please note that for error response handling, the swagger file must have an error response model with the following schema. The lambda functions should throw exceptions for any non successful responses. An optional pattern field can be specified in errorMessage field to aid the response mapping from Lambda to the proper error return status codes. .. code-block:: yaml Error: type: object properties: stackTrace: type: array items: type: array items: type: string description: call stack errorType: type: string description: error type errorMessage: type: string description: | Error message, will be matched based on pattern. If no pattern is specified, the default pattern used for response mapping will be +*. name The name of the state definition api_name The name of the rest api that we want to ensure exists in AWS API Gateway swagger_file Name of the location of the swagger rest api definition file in YAML format. stage_name Name of the stage we want to be associated with the given api_name and swagger_file definition api_key_required True or False - whether the API Key is required to call API methods lambda_integration_role The name or ARN of the IAM role that the AWS ApiGateway assumes when it executes your lambda function to handle incoming requests lambda_region The region where we expect to find the lambda functions. This is used to determine the region where we should look for the Lambda Function for integration purposes. The region determination is based on the following priority: 1. lambda_region as passed in (is not None) 2. if lambda_region is None, use the region as if a boto_lambda function were executed without explicitly specifying lambda region. 3. if region determined in (2) is different than the region used by boto_apigateway functions, a final lookup will be attempted using the boto_apigateway region. stage_variables A dict with variables and their values, or a pillar key (string) that contains a dict with variables and their values. key and values in the dict must be strings. {'string': 'string'} region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. lambda_funcname_format Please review the earlier example for the usage. The only substituable keys in the funcname format are {stage}, {api}, {resource}, {method}. Any other keys or positional subsitution parameters will be flagged as an invalid input. authorization_type This field can be either 'NONE', or 'AWS_IAM'. This will be applied to all methods in the given swagger spec file. Default is set to 'NONE' error_response_template String value that defines the response template mapping that should be applied in cases error occurs. Refer to AWS documentation for details: http://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-mapping-template-reference.html If set to None, the following default value is used: .. code-block:: text '#set($inputRoot = $input.path(\'$\'))\\n' '{\\n' ' "errorMessage" : "$inputRoot.errorMessage",\\n' ' "errorType" : "$inputRoot.errorType",\\n' ' "stackTrace" : [\\n' '#foreach($stackTrace in $inputRoot.stackTrace)\\n' ' [\\n' '#foreach($elem in $stackTrace)\\n' ' "$elem"\\n' '#if($foreach.hasNext),#end\\n' '#end\\n' ' ]\\n' '#if($foreach.hasNext),#end\\n' '#end\\n' ' ]\\n' .. versionadded:: 2017.7.0 response_template String value that defines the response template mapping applied in case of success (including OPTIONS method) If set to None, empty ({}) template is assumed, which will transfer response from the lambda function as is. .. versionadded:: 2017.7.0 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) # try to open the swagger file and basic validation swagger = _Swagger(api_name, stage_name, lambda_funcname_format, swagger_file, error_response_template, response_template, common_args) # retrieve stage variables stage_vars = _get_stage_variables(stage_variables) # verify if api and stage already exists ret = swagger.verify_api(ret) if ret.get('publish'): # there is a deployment label with signature matching the given api_name, # swagger file name, swagger file md5 sum, and swagger file info object # just reassociate the stage_name to the given deployment label. if __opts__['test']: ret['comment'] = ('[stage: {0}] will be reassociated to an already available ' 'deployment that matched the given [api_name: {1}] ' 'and [swagger_file: {2}].\n' 'Stage variables will be set ' 'to {3}.'.format(stage_name, api_name, swagger_file, stage_vars)) ret['result'] = None return ret return swagger.publish_api(ret, stage_vars) if ret.get('current'): # already at desired state for the stage, swagger_file, and api_name if __opts__['test']: ret['comment'] = ('[stage: {0}] is already at desired state with an associated ' 'deployment matching the given [api_name: {1}] ' 'and [swagger_file: {2}].\n' 'Stage variables will be set ' 'to {3}.'.format(stage_name, api_name, swagger_file, stage_vars)) ret['result'] = None return swagger.overwrite_stage_variables(ret, stage_vars) # there doesn't exist any previous deployments for the given swagger_file, we need # to redeploy the content of the swagger file to the api, models, and resources object # and finally create a new deployment and tie the stage_name to this new deployment if __opts__['test']: ret['comment'] = ('There is no deployment matching the given [api_name: {0}] ' 'and [swagger_file: {1}]. A new deployment will be ' 'created and the [stage_name: {2}] will then be associated ' 'to the newly created deployment.\n' 'Stage variables will be set ' 'to {3}.'.format(api_name, swagger_file, stage_name, stage_vars)) ret['result'] = None return ret ret = swagger.deploy_api(ret) if ret.get('abort'): return ret ret = swagger.deploy_models(ret) if ret.get('abort'): return ret ret = swagger.deploy_resources(ret, api_key_required=api_key_required, lambda_integration_role=lambda_integration_role, lambda_region=lambda_region, authorization_type=authorization_type) if ret.get('abort'): return ret ret = swagger.publish_api(ret, stage_vars) except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret def _get_stage_variables(stage_variables): ''' Helper function to retrieve stage variables from pillars/options, if the input is a string ''' ret = dict() if stage_variables is None: return ret if isinstance(stage_variables, six.string_types): if stage_variables in __opts__: ret = __opts__[stage_variables] master_opts = __pillar__.get('master', {}) if stage_variables in master_opts: ret = master_opts[stage_variables] if stage_variables in __pillar__: ret = __pillar__[stage_variables] elif isinstance(stage_variables, dict): ret = stage_variables if not isinstance(ret, dict): ret = dict() return ret def absent(name, api_name, stage_name, nuke_api=False, region=None, key=None, keyid=None, profile=None): ''' Ensure the stage_name associated with the given api_name deployed by boto_apigateway's present state is removed. If the currently associated deployment to the given stage_name has no other stages associated with it, the deployment will also be removed. name Name of the swagger file in YAML format api_name Name of the rest api on AWS ApiGateway to ensure is absent. stage_name Name of the stage to be removed irrespective of the swagger file content. If the current deployment associated with the stage_name has no other stages associated with it, the deployment will also be removed. nuke_api If True, removes the API itself only if there are no other stages associated with any other deployments once the given stage_name is removed. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) swagger = _Swagger(api_name, stage_name, '', None, None, None, common_args) if not swagger.restApiId: ret['comment'] = '[Rest API: {0}] does not exist.'.format(api_name) return ret if __opts__['test']: if nuke_api: ret['comment'] = ('[stage: {0}] will be deleted, if there are no other ' 'active stages, the [api: {1} will also be ' 'deleted.'.format(stage_name, api_name)) else: ret['comment'] = ('[stage: {0}] will be deleted.'.format(stage_name)) ret['result'] = None return ret ret = swagger.delete_stage(ret) if ret.get('abort'): return ret if nuke_api and swagger.no_more_deployments_remain(): ret = swagger.delete_api(ret) except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret # Helper Swagger Class for swagger version 2.0 API specification def _gen_md5_filehash(fname, *args): ''' helper function to generate a md5 hash of the swagger definition file any extra argument passed to the function is converted to a string and participates in the hash calculation ''' _hash = hashlib.md5() with salt.utils.files.fopen(fname, 'rb') as f: for chunk in iter(lambda: f.read(4096), b''): _hash.update(chunk) for extra_arg in args: _hash.update(six.b(str(extra_arg))) return _hash.hexdigest() def _dict_to_json_pretty(d, sort_keys=True): ''' helper function to generate pretty printed json output ''' return salt.utils.json.dumps(d, indent=4, separators=(',', ': '), sort_keys=sort_keys) # Heuristic on whether or not the property name loosely matches given set of 'interesting' factors # If you are interested in IDs for example, 'id', 'blah_id', 'blahId' would all match def _name_matches(name, matches): ''' Helper function to see if given name has any of the patterns in given matches ''' for m in matches: if name.endswith(m): return True if name.lower().endswith('_' + m.lower()): return True if name.lower() == m.lower(): return True return False def _object_reducer(o, names=('id', 'name', 'path', 'httpMethod', 'statusCode', 'Created', 'Deleted', 'Updated', 'Flushed', 'Associated', 'Disassociated')): ''' Helper function to reduce the amount of information that will be kept in the change log for API GW related return values ''' result = {} if isinstance(o, dict): for k, v in six.iteritems(o): if isinstance(v, dict): reduced = v if k == 'variables' else _object_reducer(v, names) if reduced or _name_matches(k, names): result[k] = reduced elif isinstance(v, list): newlist = [] for val in v: reduced = _object_reducer(val, names) if reduced or _name_matches(k, names): newlist.append(reduced) if newlist: result[k] = newlist else: if _name_matches(k, names): result[k] = v return result def _log_changes(ret, changekey, changevalue): ''' For logging create/update/delete operations to AWS ApiGateway ''' cl = ret['changes'].get('new', []) cl.append({changekey: _object_reducer(changevalue)}) ret['changes']['new'] = cl return ret def _log_error_and_abort(ret, obj): ''' helper function to update errors in the return structure ''' ret['result'] = False ret['abort'] = True if 'error' in obj: ret['comment'] = '{0}'.format(obj.get('error')) return ret class _Swagger(object): ''' this is a helper class that holds the swagger definition file and the associated logic related to how to interpret the file and apply it to AWS Api Gateway. The main interface to the outside world is in deploy_api, deploy_models, and deploy_resources methods. ''' SWAGGER_OBJ_V2_FIELDS = ('swagger', 'info', 'host', 'basePath', 'schemes', 'consumes', 'produces', 'paths', 'definitions', 'parameters', 'responses', 'securityDefinitions', 'security', 'tags', 'externalDocs') # SWAGGER OBJECT V2 Fields that are required by boto apigateway states. SWAGGER_OBJ_V2_FIELDS_REQUIRED = ('swagger', 'info', 'basePath', 'schemes', 'paths', 'definitions') # SWAGGER OPERATION NAMES SWAGGER_OPERATION_NAMES = ('get', 'put', 'post', 'delete', 'options', 'head', 'patch') SWAGGER_VERSIONS_SUPPORTED = ('2.0',) # VENDOR SPECIFIC FIELD PATTERNS VENDOR_EXT_PATTERN = re.compile('^x-') # JSON_SCHEMA_REF JSON_SCHEMA_DRAFT_4 = 'http://json-schema.org/draft-04/schema#' # AWS integration templates for normal and options methods REQUEST_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n' '{\n' '"header_params" : {\n' '#set ($map = $input.params().header)\n' '#foreach( $param in $map.entrySet() )\n' '"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n' '#end\n' '},\n' '"query_params" : {\n' '#set ($map = $input.params().querystring)\n' '#foreach( $param in $map.entrySet() )\n' '"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n' '#end\n' '},\n' '"path_params" : {\n' '#set ($map = $input.params().path)\n' '#foreach( $param in $map.entrySet() )\n' '"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n' '#end\n' '},\n' '"apigw_context" : {\n' '"apiId": "$context.apiId",\n' '"httpMethod": "$context.httpMethod",\n' '"requestId": "$context.requestId",\n' '"resourceId": "$context.resourceId",\n' '"resourcePath": "$context.resourcePath",\n' '"stage": "$context.stage",\n' '"identity": {\n' ' "user":"$context.identity.user",\n' ' "userArn":"$context.identity.userArn",\n' ' "userAgent":"$context.identity.userAgent",\n' ' "sourceIp":"$context.identity.sourceIp",\n' ' "cognitoIdentityId":"$context.identity.cognitoIdentityId",\n' ' "cognitoIdentityPoolId":"$context.identity.cognitoIdentityPoolId",\n' ' "cognitoAuthenticationType":"$context.identity.cognitoAuthenticationType",\n' ' "cognitoAuthenticationProvider":["$util.escapeJavaScript($context.identity.cognitoAuthenticationProvider)"],\n' ' "caller":"$context.identity.caller",\n' ' "apiKey":"$context.identity.apiKey",\n' ' "accountId":"$context.identity.accountId"\n' '}\n' '},\n' '"body_params" : $input.json(\'$\'),\n' '"stage_variables": {\n' '#foreach($variable in $stageVariables.keySet())\n' '"$variable": "$util.escapeJavaScript($stageVariables.get($variable))"\n' '#if($foreach.hasNext), #end\n' '#end\n' '}\n' '}'} REQUEST_OPTION_TEMPLATE = {'application/json': '{"statusCode": 200}'} # AWS integration response template mapping to convert stackTrace part or the error # to a uniform format containing strings only. Swagger does not seem to allow defining # an array of non-uniform types, to it is not possible to create error model to match # exactly what comes out of lambda functions in case of error. RESPONSE_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n' '{\n' ' "errorMessage" : "$inputRoot.errorMessage",\n' ' "errorType" : "$inputRoot.errorType",\n' ' "stackTrace" : [\n' '#foreach($stackTrace in $inputRoot.stackTrace)\n' ' [\n' '#foreach($elem in $stackTrace)\n' ' "$elem"\n' '#if($foreach.hasNext),#end\n' '#end\n' ' ]\n' '#if($foreach.hasNext),#end\n' '#end\n' ' ]\n' '}'} RESPONSE_OPTION_TEMPLATE = {} # This string should not be modified, every API created by this state will carry the description # below. AWS_API_DESCRIPTION = _dict_to_json_pretty({"provisioned_by": "Salt boto_apigateway.present State", "context": "See deployment or stage description"}) class SwaggerParameter(object): ''' This is a helper class for the Swagger Parameter Object ''' LOCATIONS = ('body', 'query', 'header', 'path') def __init__(self, paramdict): self._paramdict = paramdict @property def location(self): ''' returns location in the swagger parameter object ''' _location = self._paramdict.get('in') if _location in _Swagger.SwaggerParameter.LOCATIONS: return _location raise ValueError('Unsupported parameter location: {0} in Parameter Object'.format(_location)) @property def name(self): ''' returns parameter name in the swagger parameter object ''' _name = self._paramdict.get('name') if _name: if self.location == 'header': return 'method.request.header.{0}'.format(_name) elif self.location == 'query': return 'method.request.querystring.{0}'.format(_name) elif self.location == 'path': return 'method.request.path.{0}'.format(_name) return None raise ValueError('Parameter must have a name: {0}'.format(_dict_to_json_pretty(self._paramdict))) @property def schema(self): ''' returns the name of the schema given the reference in the swagger parameter object ''' if self.location == 'body': _schema = self._paramdict.get('schema') if _schema: if '$ref' in _schema: schema_name = _schema.get('$ref').split('/')[-1] return schema_name raise ValueError(('Body parameter must have a JSON reference ' 'to the schema definition due to Amazon API restrictions: {0}'.format(self.name))) raise ValueError('Body parameter must have a schema: {0}'.format(self.name)) return None class SwaggerMethodResponse(object): ''' Helper class for Swagger Method Response Object ''' def __init__(self, r): self._r = r @property def schema(self): ''' returns the name of the schema given the reference in the swagger method response object ''' _schema = self._r.get('schema') if _schema: if '$ref' in _schema: return _schema.get('$ref').split('/')[-1] raise ValueError(('Method response must have a JSON reference ' 'to the schema definition: {0}'.format(_schema))) return None @property def headers(self): ''' returns the headers dictionary in the method response object ''' _headers = self._r.get('headers', {}) return _headers def __init__(self, api_name, stage_name, lambda_funcname_format, swagger_file_path, error_response_template, response_template, common_aws_args): self._api_name = api_name self._stage_name = stage_name self._lambda_funcname_format = lambda_funcname_format self._common_aws_args = common_aws_args self._restApiId = '' self._deploymentId = '' self._error_response_template = error_response_template self._response_template = response_template if swagger_file_path is not None: if os.path.exists(swagger_file_path) and os.path.isfile(swagger_file_path): self._swagger_file = swagger_file_path self._md5_filehash = _gen_md5_filehash(self._swagger_file, error_response_template, response_template) with salt.utils.files.fopen(self._swagger_file, 'rb') as sf: self._cfg = salt.utils.yaml.safe_load(sf) self._swagger_version = '' else: raise IOError('Invalid swagger file path, {0}'.format(swagger_file_path)) self._validate_swagger_file() self._validate_lambda_funcname_format() self._resolve_api_id() def _is_http_error_rescode(self, code): ''' Helper function to determine if the passed code is in the 400~599 range of http error codes ''' return bool(re.match(r'^\s*[45]\d\d\s*$', code)) def _validate_error_response_model(self, paths, mods): ''' Helper function to help validate the convention established in the swagger file on how to handle response code mapping/integration ''' for path, ops in paths: for opname, opobj in six.iteritems(ops): if opname not in _Swagger.SWAGGER_OPERATION_NAMES: continue if 'responses' not in opobj: raise ValueError('missing mandatory responses field in path item object') for rescode, resobj in six.iteritems(opobj.get('responses')): if not self._is_http_error_rescode(str(rescode)): # future lint: disable=blacklisted-function continue # only check for response code from 400-599 if 'schema' not in resobj: raise ValueError('missing schema field in path {0}, ' 'op {1}, response {2}'.format(path, opname, rescode)) schemaobj = resobj.get('schema') if '$ref' not in schemaobj: raise ValueError('missing $ref field under schema in ' 'path {0}, op {1}, response {2}'.format(path, opname, rescode)) schemaobjref = schemaobj.get('$ref', '/') modelname = schemaobjref.split('/')[-1] if modelname not in mods: raise ValueError('model schema {0} reference not found ' 'under /definitions'.format(schemaobjref)) model = mods.get(modelname) if model.get('type') != 'object': raise ValueError('model schema {0} must be type object'.format(modelname)) if 'properties' not in model: raise ValueError('model schema {0} must have properties fields'.format(modelname)) modelprops = model.get('properties') if 'errorMessage' not in modelprops: raise ValueError('model schema {0} must have errorMessage as a property to ' 'match AWS convention. If pattern is not set, .+ will ' 'be used'.format(modelname)) def _validate_lambda_funcname_format(self): ''' Checks if the lambda function name format contains only known elements :return: True on success, ValueError raised on error ''' try: if self._lambda_funcname_format: known_kwargs = dict(stage='', api='', resource='', method='') self._lambda_funcname_format.format(**known_kwargs) return True except Exception: raise ValueError('Invalid lambda_funcname_format {0}. Please review ' 'documentation for known substitutable keys'.format(self._lambda_funcname_format)) def _validate_swagger_file(self): ''' High level check/validation of the input swagger file based on https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md This is not a full schema compliance check, but rather make sure that the input file (YAML or JSON) can be read into a dictionary, and we check for the content of the Swagger Object for version and info. ''' # check for any invalid fields for Swagger Object V2 for field in self._cfg: if (field not in _Swagger.SWAGGER_OBJ_V2_FIELDS and not _Swagger.VENDOR_EXT_PATTERN.match(field)): raise ValueError('Invalid Swagger Object Field: {0}'.format(field)) # check for Required Swagger fields by Saltstack boto apigateway state for field in _Swagger.SWAGGER_OBJ_V2_FIELDS_REQUIRED: if field not in self._cfg: raise ValueError('Missing Swagger Object Field: {0}'.format(field)) # check for Swagger Version self._swagger_version = self._cfg.get('swagger') if self._swagger_version not in _Swagger.SWAGGER_VERSIONS_SUPPORTED: raise ValueError('Unsupported Swagger version: {0},' 'Supported versions are {1}'.format(self._swagger_version, _Swagger.SWAGGER_VERSIONS_SUPPORTED)) log.info(type(self._models)) self._validate_error_response_model(self.paths, self._models()) @property def md5_filehash(self): ''' returns md5 hash for the swagger file ''' return self._md5_filehash @property def info(self): ''' returns the swagger info object as a dictionary ''' info = self._cfg.get('info') if not info: raise ValueError('Info Object has no values') return info @property def info_json(self): ''' returns the swagger info object as a pretty printed json string. ''' return _dict_to_json_pretty(self.info) @property def rest_api_name(self): ''' returns the name of the api ''' return self._api_name @property def rest_api_version(self): ''' returns the version field in the swagger info object ''' version = self.info.get('version') if not version: raise ValueError('Missing version value in Info Object') return version def _models(self): ''' returns an iterator for the models specified in the swagger file ''' models = self._cfg.get('definitions') if not models: raise ValueError('Definitions Object has no values, You need to define them in your swagger file') return models def models(self): ''' generator to return the tuple of model and its schema to create on aws. ''' model_dict = self._build_all_dependencies() while True: model = self._get_model_without_dependencies(model_dict) if not model: break yield (model, self._models().get(model)) @property def paths(self): ''' returns an iterator for the relative resource paths specified in the swagger file ''' paths = self._cfg.get('paths') if not paths: raise ValueError('Paths Object has no values, You need to define them in your swagger file') for path in paths: if not path.startswith('/'): raise ValueError('Path object {0} should start with /. Please fix it'.format(path)) return six.iteritems(paths) @property def basePath(self): ''' returns the base path field as defined in the swagger file ''' basePath = self._cfg.get('basePath', '') return basePath @property def restApiId(self): ''' returns the rest api id as returned by AWS on creation of the rest api ''' return self._restApiId @restApiId.setter def restApiId(self, restApiId): ''' allows the assignment of the rest api id on creation of the rest api ''' self._restApiId = restApiId @property def deployment_label_json(self): ''' this property returns the unique description in pretty printed json for a particular api deployment ''' return _dict_to_json_pretty(self.deployment_label) @property def deployment_label(self): ''' this property returns the deployment label dictionary (mainly used by stage description) ''' label = dict() label['swagger_info_object'] = self.info label['api_name'] = self.rest_api_name label['swagger_file'] = os.path.basename(self._swagger_file) label['swagger_file_md5sum'] = self.md5_filehash return label # methods to interact with boto_apigateway execution modules def _one_or_more_stages_remain(self, deploymentId): ''' Helper function to find whether there are other stages still associated with a deployment ''' stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args).get('stages') return bool(stages) def no_more_deployments_remain(self): ''' Helper function to find whether there are deployments left with stages associated ''' no_more_deployments = True deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId, **self._common_aws_args).get('deployments') if deployments: for deployment in deployments: deploymentId = deployment.get('id') stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args).get('stages') if stages: no_more_deployments = False break return no_more_deployments def _get_current_deployment_id(self): ''' Helper method to find the deployment id that the stage name is currently assocaited with. ''' deploymentId = '' stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, **self._common_aws_args).get('stage') if stage: deploymentId = stage.get('deploymentId') return deploymentId def _get_current_deployment_label(self): ''' Helper method to find the deployment label that the stage_name is currently associated with. ''' deploymentId = self._get_current_deployment_id() deployment = __salt__['boto_apigateway.describe_api_deployment'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args).get('deployment') if deployment: return deployment.get('description') return None def _get_desired_deployment_id(self): ''' Helper method to return the deployment id matching the desired deployment label for this Swagger object based on the given api_name, swagger_file ''' deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId, **self._common_aws_args).get('deployments') if deployments: for deployment in deployments: if deployment.get('description') == self.deployment_label_json: return deployment.get('id') return '' def overwrite_stage_variables(self, ret, stage_variables): ''' overwrite the given stage_name's stage variables with the given stage_variables ''' res = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId, stageName=self._stage_name, variables=stage_variables, **self._common_aws_args) if not res.get('overwrite'): ret['result'] = False ret['abort'] = True ret['comment'] = res.get('error') else: ret = _log_changes(ret, 'overwrite_stage_variables', res.get('stage')) return ret def _set_current_deployment(self, stage_desc_json, stage_variables): ''' Helper method to associate the stage_name to the given deploymentId and make this current ''' stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, **self._common_aws_args).get('stage') if not stage: stage = __salt__['boto_apigateway.create_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, deploymentId=self._deploymentId, description=stage_desc_json, variables=stage_variables, **self._common_aws_args) if not stage.get('stage'): return {'set': False, 'error': stage.get('error')} else: # overwrite the stage variables overwrite = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId, stageName=self._stage_name, variables=stage_variables, **self._common_aws_args) if not overwrite.get('stage'): return {'set': False, 'error': overwrite.get('error')} return __salt__['boto_apigateway.activate_api_deployment'](restApiId=self.restApiId, stageName=self._stage_name, deploymentId=self._deploymentId, **self._common_aws_args) def _resolve_api_id(self): ''' returns an Api Id that matches the given api_name and the hardcoded _Swagger.AWS_API_DESCRIPTION as the api description ''' apis = __salt__['boto_apigateway.describe_apis'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args).get('restapi') if apis: if len(apis) == 1: self.restApiId = apis[0].get('id') else: raise ValueError('Multiple APIs matching given name {0} and ' 'description {1}'.format(self.rest_api_name, self.info_json)) def delete_stage(self, ret): ''' Method to delete the given stage_name. If the current deployment tied to the given stage_name has no other stages associated with it, the deployment will be removed as well ''' deploymentId = self._get_current_deployment_id() if deploymentId: result = __salt__['boto_apigateway.delete_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, **self._common_aws_args) if not result.get('deleted'): ret['abort'] = True ret['result'] = False ret['comment'] = 'delete_stage delete_api_stage, {0}'.format(result.get('error')) else: # check if it is safe to delete the deployment as well. if not self._one_or_more_stages_remain(deploymentId): result = __salt__['boto_apigateway.delete_api_deployment'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args) if not result.get('deleted'): ret['abort'] = True ret['result'] = False ret['comment'] = 'delete_stage delete_api_deployment, {0}'.format(result.get('error')) else: ret['comment'] = 'stage {0} has been deleted.\n'.format(self._stage_name) else: # no matching stage_name/deployment found ret['comment'] = 'stage {0} does not exist'.format(self._stage_name) return ret def verify_api(self, ret): ''' this method helps determine if the given stage_name is already on a deployment label matching the input api_name, swagger_file. If yes, returns abort with comment indicating already at desired state. If not and there is previous deployment labels in AWS matching the given input api_name and swagger file, indicate to the caller that we only need to reassociate stage_name to the previously existing deployment label. ''' if self.restApiId: deployed_label_json = self._get_current_deployment_label() if deployed_label_json == self.deployment_label_json: ret['comment'] = ('Already at desired state, the stage {0} is already at the desired ' 'deployment label:\n{1}'.format(self._stage_name, deployed_label_json)) ret['current'] = True return ret else: self._deploymentId = self._get_desired_deployment_id() if self._deploymentId: ret['publish'] = True return ret def publish_api(self, ret, stage_variables): ''' this method tie the given stage_name to a deployment matching the given swagger_file ''' stage_desc = dict() stage_desc['current_deployment_label'] = self.deployment_label stage_desc_json = _dict_to_json_pretty(stage_desc) if self._deploymentId: # just do a reassociate of stage_name to an already existing deployment res = self._set_current_deployment(stage_desc_json, stage_variables) if not res.get('set'): ret['abort'] = True ret['result'] = False ret['comment'] = res.get('error') else: ret = _log_changes(ret, 'publish_api (reassociate deployment, set stage_variables)', res.get('response')) else: # no deployment existed for the given swagger_file for this Swagger object res = __salt__['boto_apigateway.create_api_deployment'](restApiId=self.restApiId, stageName=self._stage_name, stageDescription=stage_desc_json, description=self.deployment_label_json, variables=stage_variables, **self._common_aws_args) if not res.get('created'): ret['abort'] = True ret['result'] = False ret['comment'] = res.get('error') else: ret = _log_changes(ret, 'publish_api (new deployment)', res.get('deployment')) return ret def _cleanup_api(self): ''' Helper method to clean up resources and models if we detected a change in the swagger file for a stage ''' resources = __salt__['boto_apigateway.describe_api_resources'](restApiId=self.restApiId, **self._common_aws_args) if resources.get('resources'): res = resources.get('resources')[1:] res.reverse() for resource in res: delres = __salt__['boto_apigateway.delete_api_resources'](restApiId=self.restApiId, path=resource.get('path'), **self._common_aws_args) if not delres.get('deleted'): return delres models = __salt__['boto_apigateway.describe_api_models'](restApiId=self.restApiId, **self._common_aws_args) if models.get('models'): for model in models.get('models'): delres = __salt__['boto_apigateway.delete_api_model'](restApiId=self.restApiId, modelName=model.get('name'), **self._common_aws_args) if not delres.get('deleted'): return delres return {'deleted': True} def deploy_api(self, ret): ''' this method create the top level rest api in AWS apigateway ''' if self.restApiId: res = self._cleanup_api() if not res.get('deleted'): ret['comment'] = 'Failed to cleanup restAreId {0}'.format(self.restApiId) ret['abort'] = True ret['result'] = False return ret return ret response = __salt__['boto_apigateway.create_api'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args) if not response.get('created'): ret['result'] = False ret['abort'] = True if 'error' in response: ret['comment'] = 'Failed to create rest api: {0}.'.format(response['error']['message']) return ret self.restApiId = response.get('restapi', {}).get('id') return _log_changes(ret, 'deploy_api', response.get('restapi')) def delete_api(self, ret): ''' Method to delete a Rest Api named defined in the swagger file's Info Object's title value. ret a dictionary for returning status to Saltstack ''' exists_response = __salt__['boto_apigateway.api_exists'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args) if exists_response.get('exists'): if __opts__['test']: ret['comment'] = 'Rest API named {0} is set to be deleted.'.format(self.rest_api_name) ret['result'] = None ret['abort'] = True return ret delete_api_response = __salt__['boto_apigateway.delete_api'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args) if not delete_api_response.get('deleted'): ret['result'] = False ret['abort'] = True if 'error' in delete_api_response: ret['comment'] = 'Failed to delete rest api: {0}.'.format(delete_api_response['error']['message']) return ret ret = _log_changes(ret, 'delete_api', delete_api_response) else: ret['comment'] = ('api already absent for swagger file: ' '{0}, desc: {1}'.format(self.rest_api_name, self.info_json)) return ret def _aws_model_ref_from_swagger_ref(self, r): ''' Helper function to reference models created on aws apigw ''' model_name = r.split('/')[-1] return 'https://apigateway.amazonaws.com/restapis/{0}/models/{1}'.format(self.restApiId, model_name) def _update_schema_to_aws_notation(self, schema): ''' Helper function to map model schema to aws notation ''' result = {} for k, v in schema.items(): if k == '$ref': v = self._aws_model_ref_from_swagger_ref(v) if isinstance(v, dict): v = self._update_schema_to_aws_notation(v) result[k] = v return result def _build_dependent_model_list(self, obj_schema): ''' Helper function to build the list of models the given object schema is referencing. ''' dep_models_list = [] if obj_schema: obj_schema['type'] = obj_schema.get('type', 'object') if obj_schema['type'] == 'array': dep_models_list.extend(self._build_dependent_model_list(obj_schema.get('items', {}))) else: ref = obj_schema.get('$ref') if ref: ref_obj_model = ref.split("/")[-1] ref_obj_schema = self._models().get(ref_obj_model) dep_models_list.extend(self._build_dependent_model_list(ref_obj_schema)) dep_models_list.extend([ref_obj_model]) else: # need to walk each property object properties = obj_schema.get('properties') if properties: for _, prop_obj_schema in six.iteritems(properties): dep_models_list.extend(self._build_dependent_model_list(prop_obj_schema)) return list(set(dep_models_list)) def _build_all_dependencies(self): ''' Helper function to build a map of model to their list of model reference dependencies ''' ret = {} for model, schema in six.iteritems(self._models()): dep_list = self._build_dependent_model_list(schema) ret[model] = dep_list return ret def _get_model_without_dependencies(self, models_dict): ''' Helper function to find the next model that should be created ''' next_model = None if not models_dict: return next_model for model, dependencies in six.iteritems(models_dict): if dependencies == []: next_model = model break if next_model is None: raise ValueError('incomplete model definitions, models in dependency ' 'list not defined: {0}'.format(models_dict)) # remove the model from other depednencies before returning models_dict.pop(next_model) for model, dep_list in six.iteritems(models_dict): if next_model in dep_list: dep_list.remove(next_model) return next_model def deploy_models(self, ret): ''' Method to deploy swagger file's definition objects and associated schema to AWS Apigateway as Models ret a dictionary for returning status to Saltstack ''' for model, schema in self.models(): # add in a few attributes into the model schema that AWS expects # _schema = schema.copy() _schema = self._update_schema_to_aws_notation(schema) _schema.update({'$schema': _Swagger.JSON_SCHEMA_DRAFT_4, 'title': '{0} Schema'.format(model)}) # check to see if model already exists, aws has 2 default models [Empty, Error] # which may need upate with data from swagger file model_exists_response = __salt__['boto_apigateway.api_model_exists'](restApiId=self.restApiId, modelName=model, **self._common_aws_args) if model_exists_response.get('exists'): update_model_schema_response = ( __salt__['boto_apigateway.update_api_model_schema'](restApiId=self.restApiId, modelName=model, schema=_dict_to_json_pretty(_schema), **self._common_aws_args)) if not update_model_schema_response.get('updated'): ret['result'] = False ret['abort'] = True if 'error' in update_model_schema_response: ret['comment'] = ('Failed to update existing model {0} with schema {1}, ' 'error: {2}'.format(model, _dict_to_json_pretty(schema), update_model_schema_response['error']['message'])) return ret ret = _log_changes(ret, 'deploy_models', update_model_schema_response) else: create_model_response = ( __salt__['boto_apigateway.create_api_model'](restApiId=self.restApiId, modelName=model, modelDescription=model, schema=_dict_to_json_pretty(_schema), contentType='application/json', **self._common_aws_args)) if not create_model_response.get('created'): ret['result'] = False ret['abort'] = True if 'error' in create_model_response: ret['comment'] = ('Failed to create model {0}, schema {1}, ' 'error: {2}'.format(model, _dict_to_json_pretty(schema), create_model_response['error']['message'])) return ret ret = _log_changes(ret, 'deploy_models', create_model_response) return ret def _lambda_name(self, resourcePath, httpMethod): ''' Helper method to construct lambda name based on the rule specified in doc string of boto_apigateway.api_present function ''' lambda_name = self._lambda_funcname_format.format(stage=self._stage_name, api=self.rest_api_name, resource=resourcePath, method=httpMethod) lambda_name = lambda_name.strip() lambda_name = re.sub(r'{|}', '', lambda_name) lambda_name = re.sub(r'\s+|/', '_', lambda_name).lower() return re.sub(r'_+', '_', lambda_name) def _lambda_uri(self, lambda_name, lambda_region): ''' Helper Method to construct the lambda uri for use in method integration ''' profile = self._common_aws_args.get('profile') region = self._common_aws_args.get('region') lambda_region = __utils__['boto3.get_region']('lambda', lambda_region, profile) apigw_region = __utils__['boto3.get_region']('apigateway', region, profile) lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args) if lambda_region != apigw_region: if not lambda_desc.get('function'): # try look up in the same region as the apigateway as well if previous lookup failed lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args) if not lambda_desc.get('function'): raise ValueError('Could not find lambda function {0} in ' 'regions [{1}, {2}].'.format(lambda_name, lambda_region, apigw_region)) lambda_arn = lambda_desc.get('function').get('FunctionArn') lambda_uri = ('arn:aws:apigateway:{0}:lambda:path/2015-03-31' '/functions/{1}/invocations'.format(apigw_region, lambda_arn)) return lambda_uri def _parse_method_data(self, method_name, method_data): ''' Helper function to construct the method request params, models, request_templates and integration_type values needed to configure method request integration/mappings. ''' method_params = {} method_models = {} if 'parameters' in method_data: for param in method_data['parameters']: p = _Swagger.SwaggerParameter(param) if p.name: method_params[p.name] = True if p.schema: method_models['application/json'] = p.schema request_templates = _Swagger.REQUEST_OPTION_TEMPLATE if method_name == 'options' else _Swagger.REQUEST_TEMPLATE integration_type = "MOCK" if method_name == 'options' else "AWS" return {'params': method_params, 'models': method_models, 'request_templates': request_templates, 'integration_type': integration_type} def _find_patterns(self, o): result = [] if isinstance(o, dict): for k, v in six.iteritems(o): if isinstance(v, dict): result.extend(self._find_patterns(v)) else: if k == 'pattern': result.append(v) return result def _get_pattern_for_schema(self, schema_name, httpStatus): ''' returns the pattern specified in a response schema ''' defaultPattern = '.+' if self._is_http_error_rescode(httpStatus) else '.*' model = self._models().get(schema_name) patterns = self._find_patterns(model) return patterns[0] if patterns else defaultPattern def _get_response_template(self, method_name, http_status): if method_name == 'options' or not self._is_http_error_rescode(http_status): response_templates = {'application/json': self._response_template} \ if self._response_template else self.RESPONSE_OPTION_TEMPLATE else: response_templates = {'application/json': self._error_response_template} \ if self._error_response_template else self.RESPONSE_TEMPLATE return response_templates def _parse_method_response(self, method_name, method_response, httpStatus): ''' Helper function to construct the method response params, models, and integration_params values needed to configure method response integration/mappings. ''' method_response_models = {} method_response_pattern = '.*' if method_response.schema: method_response_models['application/json'] = method_response.schema method_response_pattern = self._get_pattern_for_schema(method_response.schema, httpStatus) method_response_params = {} method_integration_response_params = {} for header in method_response.headers: response_header = 'method.response.header.{0}'.format(header) method_response_params[response_header] = False header_data = method_response.headers.get(header) method_integration_response_params[response_header] = ( "'{0}'".format(header_data.get('default')) if 'default' in header_data else "'*'") response_templates = self._get_response_template(method_name, httpStatus) return {'params': method_response_params, 'models': method_response_models, 'integration_params': method_integration_response_params, 'pattern': method_response_pattern, 'response_templates': response_templates} def _deploy_method(self, ret, resource_path, method_name, method_data, api_key_required, lambda_integration_role, lambda_region, authorization_type): ''' Method to create a method for the given resource path, along with its associated request and response integrations. ret a dictionary for returning status to Saltstack resource_path the full resource path where the named method_name will be associated with. method_name a string that is one of the following values: 'delete', 'get', 'head', 'options', 'patch', 'post', 'put' method_data the value dictionary for this method in the swagger definition file. api_key_required True or False, whether api key is required to access this method. lambda_integration_role name of the IAM role or IAM role arn that Api Gateway will assume when executing the associated lambda function lambda_region the region for the lambda function that Api Gateway will integrate to. authorization_type 'NONE' or 'AWS_IAM' ''' method = self._parse_method_data(method_name.lower(), method_data) # for options method to enable CORS, api_key_required will be set to False always. # authorization_type will be set to 'NONE' always. if method_name.lower() == 'options': api_key_required = False authorization_type = 'NONE' m = __salt__['boto_apigateway.create_api_method'](restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), authorizationType=authorization_type, apiKeyRequired=api_key_required, requestParameters=method.get('params'), requestModels=method.get('models'), **self._common_aws_args) if not m.get('created'): ret = _log_error_and_abort(ret, m) return ret ret = _log_changes(ret, '_deploy_method.create_api_method', m) lambda_uri = "" if method_name.lower() != 'options': lambda_uri = self._lambda_uri(self._lambda_name(resource_path, method_name), lambda_region=lambda_region) # NOTE: integration method is set to POST always, as otherwise AWS makes wrong assumptions # about the intent of the call. HTTP method will be passed to lambda as part of the API gateway context integration = ( __salt__['boto_apigateway.create_api_integration'](restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), integrationType=method.get('integration_type'), integrationHttpMethod='POST', uri=lambda_uri, credentials=lambda_integration_role, requestTemplates=method.get('request_templates'), **self._common_aws_args)) if not integration.get('created'): ret = _log_error_and_abort(ret, integration) return ret ret = _log_changes(ret, '_deploy_method.create_api_integration', integration) if 'responses' in method_data: for response, response_data in six.iteritems(method_data['responses']): httpStatus = str(response) # future lint: disable=blacklisted-function method_response = self._parse_method_response(method_name.lower(), _Swagger.SwaggerMethodResponse(response_data), httpStatus) mr = __salt__['boto_apigateway.create_api_method_response']( restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), statusCode=httpStatus, responseParameters=method_response.get('params'), responseModels=method_response.get('models'), **self._common_aws_args) if not mr.get('created'): ret = _log_error_and_abort(ret, mr) return ret ret = _log_changes(ret, '_deploy_method.create_api_method_response', mr) mir = __salt__['boto_apigateway.create_api_integration_response']( restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), statusCode=httpStatus, selectionPattern=method_response.get('pattern'), responseParameters=method_response.get('integration_params'), responseTemplates=method_response.get('response_templates'), **self._common_aws_args) if not mir.get('created'): ret = _log_error_and_abort(ret, mir) return ret ret = _log_changes(ret, '_deploy_method.create_api_integration_response', mir) else: raise ValueError('No responses specified for {0} {1}'.format(resource_path, method_name)) return ret def deploy_resources(self, ret, api_key_required, lambda_integration_role, lambda_region, authorization_type): ''' Method to deploy resources defined in the swagger file. ret a dictionary for returning status to Saltstack api_key_required True or False, whether api key is required to access this method. lambda_integration_role name of the IAM role or IAM role arn that Api Gateway will assume when executing the associated lambda function lambda_region the region for the lambda function that Api Gateway will integrate to. authorization_type 'NONE' or 'AWS_IAM' ''' for path, pathData in self.paths: resource = __salt__['boto_apigateway.create_api_resources'](restApiId=self.restApiId, path=path, **self._common_aws_args) if not resource.get('created'): ret = _log_error_and_abort(ret, resource) return ret ret = _log_changes(ret, 'deploy_resources', resource) for method, method_data in six.iteritems(pathData): if method in _Swagger.SWAGGER_OPERATION_NAMES: ret = self._deploy_method(ret, path, method, method_data, api_key_required, lambda_integration_role, lambda_region, authorization_type) return ret def usage_plan_present(name, plan_name, description=None, throttle=None, quota=None, region=None, key=None, keyid=None, profile=None): ''' Ensure the spcifieda usage plan with the corresponding metrics is deployed .. versionadded:: 2017.7.0 name name of the state plan_name [Required] name of the usage plan throttle [Optional] throttling parameters expressed as a dictionary. If provided, at least one of the throttling parameters must be present rateLimit rate per second at which capacity bucket is populated burstLimit maximum rate allowed quota [Optional] quota on the number of api calls permitted by the plan. If provided, limit and period must be present limit [Required] number of calls permitted per quota period offset [Optional] number of calls to be subtracted from the limit at the beginning of the period period [Required] period to which quota applies. Must be DAY, WEEK or MONTH .. code-block:: yaml UsagePlanPresent: boto_apigateway.usage_plan_present: - plan_name: my_usage_plan - throttle: rateLimit: 70 burstLimit: 100 - quota: limit: 1000 offset: 0 period: DAY - profile: my_profile ''' func_params = locals() ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) existing = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args) if 'error' in existing: ret['result'] = False ret['comment'] = 'Failed to describe existing usage plans' return ret if not existing['plans']: # plan does not exist, we need to create it if __opts__['test']: ret['comment'] = 'a new usage plan {0} would be created'.format(plan_name) ret['result'] = None return ret result = __salt__['boto_apigateway.create_usage_plan'](name=plan_name, description=description, throttle=throttle, quota=quota, **common_args) if 'error' in result: ret['result'] = False ret['comment'] = 'Failed to create a usage plan {0}, {1}'.format(plan_name, result['error']) return ret ret['changes']['old'] = {'plan': None} ret['comment'] = 'A new usage plan {0} has been created'.format(plan_name) else: # need an existing plan modified to match given value plan = existing['plans'][0] needs_updating = False modifiable_params = (('throttle', ('rateLimit', 'burstLimit')), ('quota', ('limit', 'offset', 'period'))) for p, fields in modifiable_params: for f in fields: actual_param = {} if func_params.get(p) is None else func_params.get(p) if plan.get(p, {}).get(f, None) != actual_param.get(f, None): needs_updating = True break if not needs_updating: ret['comment'] = 'usage plan {0} is already in a correct state'.format(plan_name) ret['result'] = True return ret if __opts__['test']: ret['comment'] = 'a new usage plan {0} would be updated'.format(plan_name) ret['result'] = None return ret result = __salt__['boto_apigateway.update_usage_plan'](plan['id'], throttle=throttle, quota=quota, **common_args) if 'error' in result: ret['result'] = False ret['comment'] = 'Failed to update a usage plan {0}, {1}'.format(plan_name, result['error']) return ret ret['changes']['old'] = {'plan': plan} ret['comment'] = 'usage plan {0} has been updated'.format(plan_name) newstate = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args) if 'error' in existing: ret['result'] = False ret['comment'] = 'Failed to describe existing usage plans after updates' return ret ret['changes']['new'] = {'plan': newstate['plans'][0]} except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret def usage_plan_absent(name, plan_name, region=None, key=None, keyid=None, profile=None): ''' Ensures usage plan identified by name is no longer present .. versionadded:: 2017.7.0 name name of the state plan_name name of the plan to remove .. code-block:: yaml usage plan absent: boto_apigateway.usage_plan_absent: - plan_name: my_usage_plan - profile: my_profile ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) existing = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args) if 'error' in existing: ret['result'] = False ret['comment'] = 'Failed to describe existing usage plans' return ret if not existing['plans']: ret['comment'] = 'Usage plan {0} does not exist already'.format(plan_name) return ret if __opts__['test']: ret['comment'] = 'Usage plan {0} exists and would be deleted'.format(plan_name) ret['result'] = None return ret plan_id = existing['plans'][0]['id'] result = __salt__['boto_apigateway.delete_usage_plan'](plan_id, **common_args) if 'error' in result: ret['result'] = False ret['comment'] = 'Failed to delete usage plan {0}, {1}'.format(plan_name, result) return ret ret['comment'] = 'Usage plan {0} has been deleted'.format(plan_name) ret['changes']['old'] = {'plan': existing['plans'][0]} ret['changes']['new'] = {'plan': None} except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret def usage_plan_association_absent(name, plan_name, api_stages, region=None, key=None, keyid=None, profile=None): ''' Ensures usage plan identified by name is removed from provided api_stages If a plan is associated to stages not listed in api_stages parameter, those associations remain intact. .. versionadded:: 2017.7.0 name name of the state plan_name name of the plan to use api_stages list of dictionaries, where each dictionary consists of the following keys: apiId apiId of the api to detach usage plan from stage stage name of the api to detach usage plan from .. code-block:: yaml UsagePlanAssociationAbsent: boto_apigateway.usage_plan_association_absent: - plan_name: my_plan - api_stages: - apiId: 9kb0404ec0 stage: my_stage - apiId: l9v7o2aj90 stage: my_stage - profile: my_profile ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } try: common_args = dict([('region', region), ('key', key), ('keyid', keyid), ('profile', profile)]) existing = __salt__['boto_apigateway.describe_usage_plans'](name=plan_name, **common_args) if 'error' in existing: ret['result'] = False ret['comment'] = 'Failed to describe existing usage plans' return ret if not existing['plans']: ret['comment'] = 'Usage plan {0} does not exist'.format(plan_name) ret['result'] = False return ret if len(existing['plans']) != 1: ret['comment'] = 'There are multiple usage plans with the same name - it is not supported' ret['result'] = False return ret plan = existing['plans'][0] plan_id = plan['id'] plan_stages = plan.get('apiStages', []) if not plan_stages: ret['comment'] = 'Usage plan {0} has no associated stages already'.format(plan_name) return ret stages_to_remove = [] for api in api_stages: if api in plan_stages: stages_to_remove.append(api) if not stages_to_remove: ret['comment'] = 'Usage plan is already not asssociated to any api stages' return ret result = __salt__['boto_apigateway.detach_usage_plan_from_apis'](plan_id, stages_to_remove, **common_args) if 'error' in result: ret['comment'] = 'Failed to disassociate a usage plan {0} from the apis {1}, {2}'.format(plan_name, stages_to_remove, result['error']) ret['result'] = False return ret ret['comment'] = 'successfully disassociated usage plan from apis' ret['changes']['old'] = plan_stages ret['changes']['new'] = result.get('result', {}).get('apiStages', []) except (ValueError, IOError) as e: ret['result'] = False ret['comment'] = '{0}'.format(e.args) return ret
saltstack/salt
salt/states/boto_apigateway.py
_Swagger._validate_error_response_model
python
def _validate_error_response_model(self, paths, mods): ''' Helper function to help validate the convention established in the swagger file on how to handle response code mapping/integration ''' for path, ops in paths: for opname, opobj in six.iteritems(ops): if opname not in _Swagger.SWAGGER_OPERATION_NAMES: continue if 'responses' not in opobj: raise ValueError('missing mandatory responses field in path item object') for rescode, resobj in six.iteritems(opobj.get('responses')): if not self._is_http_error_rescode(str(rescode)): # future lint: disable=blacklisted-function continue # only check for response code from 400-599 if 'schema' not in resobj: raise ValueError('missing schema field in path {0}, ' 'op {1}, response {2}'.format(path, opname, rescode)) schemaobj = resobj.get('schema') if '$ref' not in schemaobj: raise ValueError('missing $ref field under schema in ' 'path {0}, op {1}, response {2}'.format(path, opname, rescode)) schemaobjref = schemaobj.get('$ref', '/') modelname = schemaobjref.split('/')[-1] if modelname not in mods: raise ValueError('model schema {0} reference not found ' 'under /definitions'.format(schemaobjref)) model = mods.get(modelname) if model.get('type') != 'object': raise ValueError('model schema {0} must be type object'.format(modelname)) if 'properties' not in model: raise ValueError('model schema {0} must have properties fields'.format(modelname)) modelprops = model.get('properties') if 'errorMessage' not in modelprops: raise ValueError('model schema {0} must have errorMessage as a property to ' 'match AWS convention. If pattern is not set, .+ will ' 'be used'.format(modelname))
Helper function to help validate the convention established in the swagger file on how to handle response code mapping/integration
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_apigateway.py#L754-L796
null
class _Swagger(object): ''' this is a helper class that holds the swagger definition file and the associated logic related to how to interpret the file and apply it to AWS Api Gateway. The main interface to the outside world is in deploy_api, deploy_models, and deploy_resources methods. ''' SWAGGER_OBJ_V2_FIELDS = ('swagger', 'info', 'host', 'basePath', 'schemes', 'consumes', 'produces', 'paths', 'definitions', 'parameters', 'responses', 'securityDefinitions', 'security', 'tags', 'externalDocs') # SWAGGER OBJECT V2 Fields that are required by boto apigateway states. SWAGGER_OBJ_V2_FIELDS_REQUIRED = ('swagger', 'info', 'basePath', 'schemes', 'paths', 'definitions') # SWAGGER OPERATION NAMES SWAGGER_OPERATION_NAMES = ('get', 'put', 'post', 'delete', 'options', 'head', 'patch') SWAGGER_VERSIONS_SUPPORTED = ('2.0',) # VENDOR SPECIFIC FIELD PATTERNS VENDOR_EXT_PATTERN = re.compile('^x-') # JSON_SCHEMA_REF JSON_SCHEMA_DRAFT_4 = 'http://json-schema.org/draft-04/schema#' # AWS integration templates for normal and options methods REQUEST_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n' '{\n' '"header_params" : {\n' '#set ($map = $input.params().header)\n' '#foreach( $param in $map.entrySet() )\n' '"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n' '#end\n' '},\n' '"query_params" : {\n' '#set ($map = $input.params().querystring)\n' '#foreach( $param in $map.entrySet() )\n' '"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n' '#end\n' '},\n' '"path_params" : {\n' '#set ($map = $input.params().path)\n' '#foreach( $param in $map.entrySet() )\n' '"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n' '#end\n' '},\n' '"apigw_context" : {\n' '"apiId": "$context.apiId",\n' '"httpMethod": "$context.httpMethod",\n' '"requestId": "$context.requestId",\n' '"resourceId": "$context.resourceId",\n' '"resourcePath": "$context.resourcePath",\n' '"stage": "$context.stage",\n' '"identity": {\n' ' "user":"$context.identity.user",\n' ' "userArn":"$context.identity.userArn",\n' ' "userAgent":"$context.identity.userAgent",\n' ' "sourceIp":"$context.identity.sourceIp",\n' ' "cognitoIdentityId":"$context.identity.cognitoIdentityId",\n' ' "cognitoIdentityPoolId":"$context.identity.cognitoIdentityPoolId",\n' ' "cognitoAuthenticationType":"$context.identity.cognitoAuthenticationType",\n' ' "cognitoAuthenticationProvider":["$util.escapeJavaScript($context.identity.cognitoAuthenticationProvider)"],\n' ' "caller":"$context.identity.caller",\n' ' "apiKey":"$context.identity.apiKey",\n' ' "accountId":"$context.identity.accountId"\n' '}\n' '},\n' '"body_params" : $input.json(\'$\'),\n' '"stage_variables": {\n' '#foreach($variable in $stageVariables.keySet())\n' '"$variable": "$util.escapeJavaScript($stageVariables.get($variable))"\n' '#if($foreach.hasNext), #end\n' '#end\n' '}\n' '}'} REQUEST_OPTION_TEMPLATE = {'application/json': '{"statusCode": 200}'} # AWS integration response template mapping to convert stackTrace part or the error # to a uniform format containing strings only. Swagger does not seem to allow defining # an array of non-uniform types, to it is not possible to create error model to match # exactly what comes out of lambda functions in case of error. RESPONSE_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n' '{\n' ' "errorMessage" : "$inputRoot.errorMessage",\n' ' "errorType" : "$inputRoot.errorType",\n' ' "stackTrace" : [\n' '#foreach($stackTrace in $inputRoot.stackTrace)\n' ' [\n' '#foreach($elem in $stackTrace)\n' ' "$elem"\n' '#if($foreach.hasNext),#end\n' '#end\n' ' ]\n' '#if($foreach.hasNext),#end\n' '#end\n' ' ]\n' '}'} RESPONSE_OPTION_TEMPLATE = {} # This string should not be modified, every API created by this state will carry the description # below. AWS_API_DESCRIPTION = _dict_to_json_pretty({"provisioned_by": "Salt boto_apigateway.present State", "context": "See deployment or stage description"}) class SwaggerParameter(object): ''' This is a helper class for the Swagger Parameter Object ''' LOCATIONS = ('body', 'query', 'header', 'path') def __init__(self, paramdict): self._paramdict = paramdict @property def location(self): ''' returns location in the swagger parameter object ''' _location = self._paramdict.get('in') if _location in _Swagger.SwaggerParameter.LOCATIONS: return _location raise ValueError('Unsupported parameter location: {0} in Parameter Object'.format(_location)) @property def name(self): ''' returns parameter name in the swagger parameter object ''' _name = self._paramdict.get('name') if _name: if self.location == 'header': return 'method.request.header.{0}'.format(_name) elif self.location == 'query': return 'method.request.querystring.{0}'.format(_name) elif self.location == 'path': return 'method.request.path.{0}'.format(_name) return None raise ValueError('Parameter must have a name: {0}'.format(_dict_to_json_pretty(self._paramdict))) @property def schema(self): ''' returns the name of the schema given the reference in the swagger parameter object ''' if self.location == 'body': _schema = self._paramdict.get('schema') if _schema: if '$ref' in _schema: schema_name = _schema.get('$ref').split('/')[-1] return schema_name raise ValueError(('Body parameter must have a JSON reference ' 'to the schema definition due to Amazon API restrictions: {0}'.format(self.name))) raise ValueError('Body parameter must have a schema: {0}'.format(self.name)) return None class SwaggerMethodResponse(object): ''' Helper class for Swagger Method Response Object ''' def __init__(self, r): self._r = r @property def schema(self): ''' returns the name of the schema given the reference in the swagger method response object ''' _schema = self._r.get('schema') if _schema: if '$ref' in _schema: return _schema.get('$ref').split('/')[-1] raise ValueError(('Method response must have a JSON reference ' 'to the schema definition: {0}'.format(_schema))) return None @property def headers(self): ''' returns the headers dictionary in the method response object ''' _headers = self._r.get('headers', {}) return _headers def __init__(self, api_name, stage_name, lambda_funcname_format, swagger_file_path, error_response_template, response_template, common_aws_args): self._api_name = api_name self._stage_name = stage_name self._lambda_funcname_format = lambda_funcname_format self._common_aws_args = common_aws_args self._restApiId = '' self._deploymentId = '' self._error_response_template = error_response_template self._response_template = response_template if swagger_file_path is not None: if os.path.exists(swagger_file_path) and os.path.isfile(swagger_file_path): self._swagger_file = swagger_file_path self._md5_filehash = _gen_md5_filehash(self._swagger_file, error_response_template, response_template) with salt.utils.files.fopen(self._swagger_file, 'rb') as sf: self._cfg = salt.utils.yaml.safe_load(sf) self._swagger_version = '' else: raise IOError('Invalid swagger file path, {0}'.format(swagger_file_path)) self._validate_swagger_file() self._validate_lambda_funcname_format() self._resolve_api_id() def _is_http_error_rescode(self, code): ''' Helper function to determine if the passed code is in the 400~599 range of http error codes ''' return bool(re.match(r'^\s*[45]\d\d\s*$', code)) def _validate_lambda_funcname_format(self): ''' Checks if the lambda function name format contains only known elements :return: True on success, ValueError raised on error ''' try: if self._lambda_funcname_format: known_kwargs = dict(stage='', api='', resource='', method='') self._lambda_funcname_format.format(**known_kwargs) return True except Exception: raise ValueError('Invalid lambda_funcname_format {0}. Please review ' 'documentation for known substitutable keys'.format(self._lambda_funcname_format)) def _validate_swagger_file(self): ''' High level check/validation of the input swagger file based on https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md This is not a full schema compliance check, but rather make sure that the input file (YAML or JSON) can be read into a dictionary, and we check for the content of the Swagger Object for version and info. ''' # check for any invalid fields for Swagger Object V2 for field in self._cfg: if (field not in _Swagger.SWAGGER_OBJ_V2_FIELDS and not _Swagger.VENDOR_EXT_PATTERN.match(field)): raise ValueError('Invalid Swagger Object Field: {0}'.format(field)) # check for Required Swagger fields by Saltstack boto apigateway state for field in _Swagger.SWAGGER_OBJ_V2_FIELDS_REQUIRED: if field not in self._cfg: raise ValueError('Missing Swagger Object Field: {0}'.format(field)) # check for Swagger Version self._swagger_version = self._cfg.get('swagger') if self._swagger_version not in _Swagger.SWAGGER_VERSIONS_SUPPORTED: raise ValueError('Unsupported Swagger version: {0},' 'Supported versions are {1}'.format(self._swagger_version, _Swagger.SWAGGER_VERSIONS_SUPPORTED)) log.info(type(self._models)) self._validate_error_response_model(self.paths, self._models()) @property def md5_filehash(self): ''' returns md5 hash for the swagger file ''' return self._md5_filehash @property def info(self): ''' returns the swagger info object as a dictionary ''' info = self._cfg.get('info') if not info: raise ValueError('Info Object has no values') return info @property def info_json(self): ''' returns the swagger info object as a pretty printed json string. ''' return _dict_to_json_pretty(self.info) @property def rest_api_name(self): ''' returns the name of the api ''' return self._api_name @property def rest_api_version(self): ''' returns the version field in the swagger info object ''' version = self.info.get('version') if not version: raise ValueError('Missing version value in Info Object') return version def _models(self): ''' returns an iterator for the models specified in the swagger file ''' models = self._cfg.get('definitions') if not models: raise ValueError('Definitions Object has no values, You need to define them in your swagger file') return models def models(self): ''' generator to return the tuple of model and its schema to create on aws. ''' model_dict = self._build_all_dependencies() while True: model = self._get_model_without_dependencies(model_dict) if not model: break yield (model, self._models().get(model)) @property def paths(self): ''' returns an iterator for the relative resource paths specified in the swagger file ''' paths = self._cfg.get('paths') if not paths: raise ValueError('Paths Object has no values, You need to define them in your swagger file') for path in paths: if not path.startswith('/'): raise ValueError('Path object {0} should start with /. Please fix it'.format(path)) return six.iteritems(paths) @property def basePath(self): ''' returns the base path field as defined in the swagger file ''' basePath = self._cfg.get('basePath', '') return basePath @property def restApiId(self): ''' returns the rest api id as returned by AWS on creation of the rest api ''' return self._restApiId @restApiId.setter def restApiId(self, restApiId): ''' allows the assignment of the rest api id on creation of the rest api ''' self._restApiId = restApiId @property def deployment_label_json(self): ''' this property returns the unique description in pretty printed json for a particular api deployment ''' return _dict_to_json_pretty(self.deployment_label) @property def deployment_label(self): ''' this property returns the deployment label dictionary (mainly used by stage description) ''' label = dict() label['swagger_info_object'] = self.info label['api_name'] = self.rest_api_name label['swagger_file'] = os.path.basename(self._swagger_file) label['swagger_file_md5sum'] = self.md5_filehash return label # methods to interact with boto_apigateway execution modules def _one_or_more_stages_remain(self, deploymentId): ''' Helper function to find whether there are other stages still associated with a deployment ''' stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args).get('stages') return bool(stages) def no_more_deployments_remain(self): ''' Helper function to find whether there are deployments left with stages associated ''' no_more_deployments = True deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId, **self._common_aws_args).get('deployments') if deployments: for deployment in deployments: deploymentId = deployment.get('id') stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args).get('stages') if stages: no_more_deployments = False break return no_more_deployments def _get_current_deployment_id(self): ''' Helper method to find the deployment id that the stage name is currently assocaited with. ''' deploymentId = '' stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, **self._common_aws_args).get('stage') if stage: deploymentId = stage.get('deploymentId') return deploymentId def _get_current_deployment_label(self): ''' Helper method to find the deployment label that the stage_name is currently associated with. ''' deploymentId = self._get_current_deployment_id() deployment = __salt__['boto_apigateway.describe_api_deployment'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args).get('deployment') if deployment: return deployment.get('description') return None def _get_desired_deployment_id(self): ''' Helper method to return the deployment id matching the desired deployment label for this Swagger object based on the given api_name, swagger_file ''' deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId, **self._common_aws_args).get('deployments') if deployments: for deployment in deployments: if deployment.get('description') == self.deployment_label_json: return deployment.get('id') return '' def overwrite_stage_variables(self, ret, stage_variables): ''' overwrite the given stage_name's stage variables with the given stage_variables ''' res = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId, stageName=self._stage_name, variables=stage_variables, **self._common_aws_args) if not res.get('overwrite'): ret['result'] = False ret['abort'] = True ret['comment'] = res.get('error') else: ret = _log_changes(ret, 'overwrite_stage_variables', res.get('stage')) return ret def _set_current_deployment(self, stage_desc_json, stage_variables): ''' Helper method to associate the stage_name to the given deploymentId and make this current ''' stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, **self._common_aws_args).get('stage') if not stage: stage = __salt__['boto_apigateway.create_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, deploymentId=self._deploymentId, description=stage_desc_json, variables=stage_variables, **self._common_aws_args) if not stage.get('stage'): return {'set': False, 'error': stage.get('error')} else: # overwrite the stage variables overwrite = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId, stageName=self._stage_name, variables=stage_variables, **self._common_aws_args) if not overwrite.get('stage'): return {'set': False, 'error': overwrite.get('error')} return __salt__['boto_apigateway.activate_api_deployment'](restApiId=self.restApiId, stageName=self._stage_name, deploymentId=self._deploymentId, **self._common_aws_args) def _resolve_api_id(self): ''' returns an Api Id that matches the given api_name and the hardcoded _Swagger.AWS_API_DESCRIPTION as the api description ''' apis = __salt__['boto_apigateway.describe_apis'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args).get('restapi') if apis: if len(apis) == 1: self.restApiId = apis[0].get('id') else: raise ValueError('Multiple APIs matching given name {0} and ' 'description {1}'.format(self.rest_api_name, self.info_json)) def delete_stage(self, ret): ''' Method to delete the given stage_name. If the current deployment tied to the given stage_name has no other stages associated with it, the deployment will be removed as well ''' deploymentId = self._get_current_deployment_id() if deploymentId: result = __salt__['boto_apigateway.delete_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, **self._common_aws_args) if not result.get('deleted'): ret['abort'] = True ret['result'] = False ret['comment'] = 'delete_stage delete_api_stage, {0}'.format(result.get('error')) else: # check if it is safe to delete the deployment as well. if not self._one_or_more_stages_remain(deploymentId): result = __salt__['boto_apigateway.delete_api_deployment'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args) if not result.get('deleted'): ret['abort'] = True ret['result'] = False ret['comment'] = 'delete_stage delete_api_deployment, {0}'.format(result.get('error')) else: ret['comment'] = 'stage {0} has been deleted.\n'.format(self._stage_name) else: # no matching stage_name/deployment found ret['comment'] = 'stage {0} does not exist'.format(self._stage_name) return ret def verify_api(self, ret): ''' this method helps determine if the given stage_name is already on a deployment label matching the input api_name, swagger_file. If yes, returns abort with comment indicating already at desired state. If not and there is previous deployment labels in AWS matching the given input api_name and swagger file, indicate to the caller that we only need to reassociate stage_name to the previously existing deployment label. ''' if self.restApiId: deployed_label_json = self._get_current_deployment_label() if deployed_label_json == self.deployment_label_json: ret['comment'] = ('Already at desired state, the stage {0} is already at the desired ' 'deployment label:\n{1}'.format(self._stage_name, deployed_label_json)) ret['current'] = True return ret else: self._deploymentId = self._get_desired_deployment_id() if self._deploymentId: ret['publish'] = True return ret def publish_api(self, ret, stage_variables): ''' this method tie the given stage_name to a deployment matching the given swagger_file ''' stage_desc = dict() stage_desc['current_deployment_label'] = self.deployment_label stage_desc_json = _dict_to_json_pretty(stage_desc) if self._deploymentId: # just do a reassociate of stage_name to an already existing deployment res = self._set_current_deployment(stage_desc_json, stage_variables) if not res.get('set'): ret['abort'] = True ret['result'] = False ret['comment'] = res.get('error') else: ret = _log_changes(ret, 'publish_api (reassociate deployment, set stage_variables)', res.get('response')) else: # no deployment existed for the given swagger_file for this Swagger object res = __salt__['boto_apigateway.create_api_deployment'](restApiId=self.restApiId, stageName=self._stage_name, stageDescription=stage_desc_json, description=self.deployment_label_json, variables=stage_variables, **self._common_aws_args) if not res.get('created'): ret['abort'] = True ret['result'] = False ret['comment'] = res.get('error') else: ret = _log_changes(ret, 'publish_api (new deployment)', res.get('deployment')) return ret def _cleanup_api(self): ''' Helper method to clean up resources and models if we detected a change in the swagger file for a stage ''' resources = __salt__['boto_apigateway.describe_api_resources'](restApiId=self.restApiId, **self._common_aws_args) if resources.get('resources'): res = resources.get('resources')[1:] res.reverse() for resource in res: delres = __salt__['boto_apigateway.delete_api_resources'](restApiId=self.restApiId, path=resource.get('path'), **self._common_aws_args) if not delres.get('deleted'): return delres models = __salt__['boto_apigateway.describe_api_models'](restApiId=self.restApiId, **self._common_aws_args) if models.get('models'): for model in models.get('models'): delres = __salt__['boto_apigateway.delete_api_model'](restApiId=self.restApiId, modelName=model.get('name'), **self._common_aws_args) if not delres.get('deleted'): return delres return {'deleted': True} def deploy_api(self, ret): ''' this method create the top level rest api in AWS apigateway ''' if self.restApiId: res = self._cleanup_api() if not res.get('deleted'): ret['comment'] = 'Failed to cleanup restAreId {0}'.format(self.restApiId) ret['abort'] = True ret['result'] = False return ret return ret response = __salt__['boto_apigateway.create_api'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args) if not response.get('created'): ret['result'] = False ret['abort'] = True if 'error' in response: ret['comment'] = 'Failed to create rest api: {0}.'.format(response['error']['message']) return ret self.restApiId = response.get('restapi', {}).get('id') return _log_changes(ret, 'deploy_api', response.get('restapi')) def delete_api(self, ret): ''' Method to delete a Rest Api named defined in the swagger file's Info Object's title value. ret a dictionary for returning status to Saltstack ''' exists_response = __salt__['boto_apigateway.api_exists'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args) if exists_response.get('exists'): if __opts__['test']: ret['comment'] = 'Rest API named {0} is set to be deleted.'.format(self.rest_api_name) ret['result'] = None ret['abort'] = True return ret delete_api_response = __salt__['boto_apigateway.delete_api'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args) if not delete_api_response.get('deleted'): ret['result'] = False ret['abort'] = True if 'error' in delete_api_response: ret['comment'] = 'Failed to delete rest api: {0}.'.format(delete_api_response['error']['message']) return ret ret = _log_changes(ret, 'delete_api', delete_api_response) else: ret['comment'] = ('api already absent for swagger file: ' '{0}, desc: {1}'.format(self.rest_api_name, self.info_json)) return ret def _aws_model_ref_from_swagger_ref(self, r): ''' Helper function to reference models created on aws apigw ''' model_name = r.split('/')[-1] return 'https://apigateway.amazonaws.com/restapis/{0}/models/{1}'.format(self.restApiId, model_name) def _update_schema_to_aws_notation(self, schema): ''' Helper function to map model schema to aws notation ''' result = {} for k, v in schema.items(): if k == '$ref': v = self._aws_model_ref_from_swagger_ref(v) if isinstance(v, dict): v = self._update_schema_to_aws_notation(v) result[k] = v return result def _build_dependent_model_list(self, obj_schema): ''' Helper function to build the list of models the given object schema is referencing. ''' dep_models_list = [] if obj_schema: obj_schema['type'] = obj_schema.get('type', 'object') if obj_schema['type'] == 'array': dep_models_list.extend(self._build_dependent_model_list(obj_schema.get('items', {}))) else: ref = obj_schema.get('$ref') if ref: ref_obj_model = ref.split("/")[-1] ref_obj_schema = self._models().get(ref_obj_model) dep_models_list.extend(self._build_dependent_model_list(ref_obj_schema)) dep_models_list.extend([ref_obj_model]) else: # need to walk each property object properties = obj_schema.get('properties') if properties: for _, prop_obj_schema in six.iteritems(properties): dep_models_list.extend(self._build_dependent_model_list(prop_obj_schema)) return list(set(dep_models_list)) def _build_all_dependencies(self): ''' Helper function to build a map of model to their list of model reference dependencies ''' ret = {} for model, schema in six.iteritems(self._models()): dep_list = self._build_dependent_model_list(schema) ret[model] = dep_list return ret def _get_model_without_dependencies(self, models_dict): ''' Helper function to find the next model that should be created ''' next_model = None if not models_dict: return next_model for model, dependencies in six.iteritems(models_dict): if dependencies == []: next_model = model break if next_model is None: raise ValueError('incomplete model definitions, models in dependency ' 'list not defined: {0}'.format(models_dict)) # remove the model from other depednencies before returning models_dict.pop(next_model) for model, dep_list in six.iteritems(models_dict): if next_model in dep_list: dep_list.remove(next_model) return next_model def deploy_models(self, ret): ''' Method to deploy swagger file's definition objects and associated schema to AWS Apigateway as Models ret a dictionary for returning status to Saltstack ''' for model, schema in self.models(): # add in a few attributes into the model schema that AWS expects # _schema = schema.copy() _schema = self._update_schema_to_aws_notation(schema) _schema.update({'$schema': _Swagger.JSON_SCHEMA_DRAFT_4, 'title': '{0} Schema'.format(model)}) # check to see if model already exists, aws has 2 default models [Empty, Error] # which may need upate with data from swagger file model_exists_response = __salt__['boto_apigateway.api_model_exists'](restApiId=self.restApiId, modelName=model, **self._common_aws_args) if model_exists_response.get('exists'): update_model_schema_response = ( __salt__['boto_apigateway.update_api_model_schema'](restApiId=self.restApiId, modelName=model, schema=_dict_to_json_pretty(_schema), **self._common_aws_args)) if not update_model_schema_response.get('updated'): ret['result'] = False ret['abort'] = True if 'error' in update_model_schema_response: ret['comment'] = ('Failed to update existing model {0} with schema {1}, ' 'error: {2}'.format(model, _dict_to_json_pretty(schema), update_model_schema_response['error']['message'])) return ret ret = _log_changes(ret, 'deploy_models', update_model_schema_response) else: create_model_response = ( __salt__['boto_apigateway.create_api_model'](restApiId=self.restApiId, modelName=model, modelDescription=model, schema=_dict_to_json_pretty(_schema), contentType='application/json', **self._common_aws_args)) if not create_model_response.get('created'): ret['result'] = False ret['abort'] = True if 'error' in create_model_response: ret['comment'] = ('Failed to create model {0}, schema {1}, ' 'error: {2}'.format(model, _dict_to_json_pretty(schema), create_model_response['error']['message'])) return ret ret = _log_changes(ret, 'deploy_models', create_model_response) return ret def _lambda_name(self, resourcePath, httpMethod): ''' Helper method to construct lambda name based on the rule specified in doc string of boto_apigateway.api_present function ''' lambda_name = self._lambda_funcname_format.format(stage=self._stage_name, api=self.rest_api_name, resource=resourcePath, method=httpMethod) lambda_name = lambda_name.strip() lambda_name = re.sub(r'{|}', '', lambda_name) lambda_name = re.sub(r'\s+|/', '_', lambda_name).lower() return re.sub(r'_+', '_', lambda_name) def _lambda_uri(self, lambda_name, lambda_region): ''' Helper Method to construct the lambda uri for use in method integration ''' profile = self._common_aws_args.get('profile') region = self._common_aws_args.get('region') lambda_region = __utils__['boto3.get_region']('lambda', lambda_region, profile) apigw_region = __utils__['boto3.get_region']('apigateway', region, profile) lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args) if lambda_region != apigw_region: if not lambda_desc.get('function'): # try look up in the same region as the apigateway as well if previous lookup failed lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args) if not lambda_desc.get('function'): raise ValueError('Could not find lambda function {0} in ' 'regions [{1}, {2}].'.format(lambda_name, lambda_region, apigw_region)) lambda_arn = lambda_desc.get('function').get('FunctionArn') lambda_uri = ('arn:aws:apigateway:{0}:lambda:path/2015-03-31' '/functions/{1}/invocations'.format(apigw_region, lambda_arn)) return lambda_uri def _parse_method_data(self, method_name, method_data): ''' Helper function to construct the method request params, models, request_templates and integration_type values needed to configure method request integration/mappings. ''' method_params = {} method_models = {} if 'parameters' in method_data: for param in method_data['parameters']: p = _Swagger.SwaggerParameter(param) if p.name: method_params[p.name] = True if p.schema: method_models['application/json'] = p.schema request_templates = _Swagger.REQUEST_OPTION_TEMPLATE if method_name == 'options' else _Swagger.REQUEST_TEMPLATE integration_type = "MOCK" if method_name == 'options' else "AWS" return {'params': method_params, 'models': method_models, 'request_templates': request_templates, 'integration_type': integration_type} def _find_patterns(self, o): result = [] if isinstance(o, dict): for k, v in six.iteritems(o): if isinstance(v, dict): result.extend(self._find_patterns(v)) else: if k == 'pattern': result.append(v) return result def _get_pattern_for_schema(self, schema_name, httpStatus): ''' returns the pattern specified in a response schema ''' defaultPattern = '.+' if self._is_http_error_rescode(httpStatus) else '.*' model = self._models().get(schema_name) patterns = self._find_patterns(model) return patterns[0] if patterns else defaultPattern def _get_response_template(self, method_name, http_status): if method_name == 'options' or not self._is_http_error_rescode(http_status): response_templates = {'application/json': self._response_template} \ if self._response_template else self.RESPONSE_OPTION_TEMPLATE else: response_templates = {'application/json': self._error_response_template} \ if self._error_response_template else self.RESPONSE_TEMPLATE return response_templates def _parse_method_response(self, method_name, method_response, httpStatus): ''' Helper function to construct the method response params, models, and integration_params values needed to configure method response integration/mappings. ''' method_response_models = {} method_response_pattern = '.*' if method_response.schema: method_response_models['application/json'] = method_response.schema method_response_pattern = self._get_pattern_for_schema(method_response.schema, httpStatus) method_response_params = {} method_integration_response_params = {} for header in method_response.headers: response_header = 'method.response.header.{0}'.format(header) method_response_params[response_header] = False header_data = method_response.headers.get(header) method_integration_response_params[response_header] = ( "'{0}'".format(header_data.get('default')) if 'default' in header_data else "'*'") response_templates = self._get_response_template(method_name, httpStatus) return {'params': method_response_params, 'models': method_response_models, 'integration_params': method_integration_response_params, 'pattern': method_response_pattern, 'response_templates': response_templates} def _deploy_method(self, ret, resource_path, method_name, method_data, api_key_required, lambda_integration_role, lambda_region, authorization_type): ''' Method to create a method for the given resource path, along with its associated request and response integrations. ret a dictionary for returning status to Saltstack resource_path the full resource path where the named method_name will be associated with. method_name a string that is one of the following values: 'delete', 'get', 'head', 'options', 'patch', 'post', 'put' method_data the value dictionary for this method in the swagger definition file. api_key_required True or False, whether api key is required to access this method. lambda_integration_role name of the IAM role or IAM role arn that Api Gateway will assume when executing the associated lambda function lambda_region the region for the lambda function that Api Gateway will integrate to. authorization_type 'NONE' or 'AWS_IAM' ''' method = self._parse_method_data(method_name.lower(), method_data) # for options method to enable CORS, api_key_required will be set to False always. # authorization_type will be set to 'NONE' always. if method_name.lower() == 'options': api_key_required = False authorization_type = 'NONE' m = __salt__['boto_apigateway.create_api_method'](restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), authorizationType=authorization_type, apiKeyRequired=api_key_required, requestParameters=method.get('params'), requestModels=method.get('models'), **self._common_aws_args) if not m.get('created'): ret = _log_error_and_abort(ret, m) return ret ret = _log_changes(ret, '_deploy_method.create_api_method', m) lambda_uri = "" if method_name.lower() != 'options': lambda_uri = self._lambda_uri(self._lambda_name(resource_path, method_name), lambda_region=lambda_region) # NOTE: integration method is set to POST always, as otherwise AWS makes wrong assumptions # about the intent of the call. HTTP method will be passed to lambda as part of the API gateway context integration = ( __salt__['boto_apigateway.create_api_integration'](restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), integrationType=method.get('integration_type'), integrationHttpMethod='POST', uri=lambda_uri, credentials=lambda_integration_role, requestTemplates=method.get('request_templates'), **self._common_aws_args)) if not integration.get('created'): ret = _log_error_and_abort(ret, integration) return ret ret = _log_changes(ret, '_deploy_method.create_api_integration', integration) if 'responses' in method_data: for response, response_data in six.iteritems(method_data['responses']): httpStatus = str(response) # future lint: disable=blacklisted-function method_response = self._parse_method_response(method_name.lower(), _Swagger.SwaggerMethodResponse(response_data), httpStatus) mr = __salt__['boto_apigateway.create_api_method_response']( restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), statusCode=httpStatus, responseParameters=method_response.get('params'), responseModels=method_response.get('models'), **self._common_aws_args) if not mr.get('created'): ret = _log_error_and_abort(ret, mr) return ret ret = _log_changes(ret, '_deploy_method.create_api_method_response', mr) mir = __salt__['boto_apigateway.create_api_integration_response']( restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), statusCode=httpStatus, selectionPattern=method_response.get('pattern'), responseParameters=method_response.get('integration_params'), responseTemplates=method_response.get('response_templates'), **self._common_aws_args) if not mir.get('created'): ret = _log_error_and_abort(ret, mir) return ret ret = _log_changes(ret, '_deploy_method.create_api_integration_response', mir) else: raise ValueError('No responses specified for {0} {1}'.format(resource_path, method_name)) return ret def deploy_resources(self, ret, api_key_required, lambda_integration_role, lambda_region, authorization_type): ''' Method to deploy resources defined in the swagger file. ret a dictionary for returning status to Saltstack api_key_required True or False, whether api key is required to access this method. lambda_integration_role name of the IAM role or IAM role arn that Api Gateway will assume when executing the associated lambda function lambda_region the region for the lambda function that Api Gateway will integrate to. authorization_type 'NONE' or 'AWS_IAM' ''' for path, pathData in self.paths: resource = __salt__['boto_apigateway.create_api_resources'](restApiId=self.restApiId, path=path, **self._common_aws_args) if not resource.get('created'): ret = _log_error_and_abort(ret, resource) return ret ret = _log_changes(ret, 'deploy_resources', resource) for method, method_data in six.iteritems(pathData): if method in _Swagger.SWAGGER_OPERATION_NAMES: ret = self._deploy_method(ret, path, method, method_data, api_key_required, lambda_integration_role, lambda_region, authorization_type) return ret
saltstack/salt
salt/states/boto_apigateway.py
_Swagger._validate_lambda_funcname_format
python
def _validate_lambda_funcname_format(self): ''' Checks if the lambda function name format contains only known elements :return: True on success, ValueError raised on error ''' try: if self._lambda_funcname_format: known_kwargs = dict(stage='', api='', resource='', method='') self._lambda_funcname_format.format(**known_kwargs) return True except Exception: raise ValueError('Invalid lambda_funcname_format {0}. Please review ' 'documentation for known substitutable keys'.format(self._lambda_funcname_format))
Checks if the lambda function name format contains only known elements :return: True on success, ValueError raised on error
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_apigateway.py#L798-L813
null
class _Swagger(object): ''' this is a helper class that holds the swagger definition file and the associated logic related to how to interpret the file and apply it to AWS Api Gateway. The main interface to the outside world is in deploy_api, deploy_models, and deploy_resources methods. ''' SWAGGER_OBJ_V2_FIELDS = ('swagger', 'info', 'host', 'basePath', 'schemes', 'consumes', 'produces', 'paths', 'definitions', 'parameters', 'responses', 'securityDefinitions', 'security', 'tags', 'externalDocs') # SWAGGER OBJECT V2 Fields that are required by boto apigateway states. SWAGGER_OBJ_V2_FIELDS_REQUIRED = ('swagger', 'info', 'basePath', 'schemes', 'paths', 'definitions') # SWAGGER OPERATION NAMES SWAGGER_OPERATION_NAMES = ('get', 'put', 'post', 'delete', 'options', 'head', 'patch') SWAGGER_VERSIONS_SUPPORTED = ('2.0',) # VENDOR SPECIFIC FIELD PATTERNS VENDOR_EXT_PATTERN = re.compile('^x-') # JSON_SCHEMA_REF JSON_SCHEMA_DRAFT_4 = 'http://json-schema.org/draft-04/schema#' # AWS integration templates for normal and options methods REQUEST_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n' '{\n' '"header_params" : {\n' '#set ($map = $input.params().header)\n' '#foreach( $param in $map.entrySet() )\n' '"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n' '#end\n' '},\n' '"query_params" : {\n' '#set ($map = $input.params().querystring)\n' '#foreach( $param in $map.entrySet() )\n' '"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n' '#end\n' '},\n' '"path_params" : {\n' '#set ($map = $input.params().path)\n' '#foreach( $param in $map.entrySet() )\n' '"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n' '#end\n' '},\n' '"apigw_context" : {\n' '"apiId": "$context.apiId",\n' '"httpMethod": "$context.httpMethod",\n' '"requestId": "$context.requestId",\n' '"resourceId": "$context.resourceId",\n' '"resourcePath": "$context.resourcePath",\n' '"stage": "$context.stage",\n' '"identity": {\n' ' "user":"$context.identity.user",\n' ' "userArn":"$context.identity.userArn",\n' ' "userAgent":"$context.identity.userAgent",\n' ' "sourceIp":"$context.identity.sourceIp",\n' ' "cognitoIdentityId":"$context.identity.cognitoIdentityId",\n' ' "cognitoIdentityPoolId":"$context.identity.cognitoIdentityPoolId",\n' ' "cognitoAuthenticationType":"$context.identity.cognitoAuthenticationType",\n' ' "cognitoAuthenticationProvider":["$util.escapeJavaScript($context.identity.cognitoAuthenticationProvider)"],\n' ' "caller":"$context.identity.caller",\n' ' "apiKey":"$context.identity.apiKey",\n' ' "accountId":"$context.identity.accountId"\n' '}\n' '},\n' '"body_params" : $input.json(\'$\'),\n' '"stage_variables": {\n' '#foreach($variable in $stageVariables.keySet())\n' '"$variable": "$util.escapeJavaScript($stageVariables.get($variable))"\n' '#if($foreach.hasNext), #end\n' '#end\n' '}\n' '}'} REQUEST_OPTION_TEMPLATE = {'application/json': '{"statusCode": 200}'} # AWS integration response template mapping to convert stackTrace part or the error # to a uniform format containing strings only. Swagger does not seem to allow defining # an array of non-uniform types, to it is not possible to create error model to match # exactly what comes out of lambda functions in case of error. RESPONSE_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n' '{\n' ' "errorMessage" : "$inputRoot.errorMessage",\n' ' "errorType" : "$inputRoot.errorType",\n' ' "stackTrace" : [\n' '#foreach($stackTrace in $inputRoot.stackTrace)\n' ' [\n' '#foreach($elem in $stackTrace)\n' ' "$elem"\n' '#if($foreach.hasNext),#end\n' '#end\n' ' ]\n' '#if($foreach.hasNext),#end\n' '#end\n' ' ]\n' '}'} RESPONSE_OPTION_TEMPLATE = {} # This string should not be modified, every API created by this state will carry the description # below. AWS_API_DESCRIPTION = _dict_to_json_pretty({"provisioned_by": "Salt boto_apigateway.present State", "context": "See deployment or stage description"}) class SwaggerParameter(object): ''' This is a helper class for the Swagger Parameter Object ''' LOCATIONS = ('body', 'query', 'header', 'path') def __init__(self, paramdict): self._paramdict = paramdict @property def location(self): ''' returns location in the swagger parameter object ''' _location = self._paramdict.get('in') if _location in _Swagger.SwaggerParameter.LOCATIONS: return _location raise ValueError('Unsupported parameter location: {0} in Parameter Object'.format(_location)) @property def name(self): ''' returns parameter name in the swagger parameter object ''' _name = self._paramdict.get('name') if _name: if self.location == 'header': return 'method.request.header.{0}'.format(_name) elif self.location == 'query': return 'method.request.querystring.{0}'.format(_name) elif self.location == 'path': return 'method.request.path.{0}'.format(_name) return None raise ValueError('Parameter must have a name: {0}'.format(_dict_to_json_pretty(self._paramdict))) @property def schema(self): ''' returns the name of the schema given the reference in the swagger parameter object ''' if self.location == 'body': _schema = self._paramdict.get('schema') if _schema: if '$ref' in _schema: schema_name = _schema.get('$ref').split('/')[-1] return schema_name raise ValueError(('Body parameter must have a JSON reference ' 'to the schema definition due to Amazon API restrictions: {0}'.format(self.name))) raise ValueError('Body parameter must have a schema: {0}'.format(self.name)) return None class SwaggerMethodResponse(object): ''' Helper class for Swagger Method Response Object ''' def __init__(self, r): self._r = r @property def schema(self): ''' returns the name of the schema given the reference in the swagger method response object ''' _schema = self._r.get('schema') if _schema: if '$ref' in _schema: return _schema.get('$ref').split('/')[-1] raise ValueError(('Method response must have a JSON reference ' 'to the schema definition: {0}'.format(_schema))) return None @property def headers(self): ''' returns the headers dictionary in the method response object ''' _headers = self._r.get('headers', {}) return _headers def __init__(self, api_name, stage_name, lambda_funcname_format, swagger_file_path, error_response_template, response_template, common_aws_args): self._api_name = api_name self._stage_name = stage_name self._lambda_funcname_format = lambda_funcname_format self._common_aws_args = common_aws_args self._restApiId = '' self._deploymentId = '' self._error_response_template = error_response_template self._response_template = response_template if swagger_file_path is not None: if os.path.exists(swagger_file_path) and os.path.isfile(swagger_file_path): self._swagger_file = swagger_file_path self._md5_filehash = _gen_md5_filehash(self._swagger_file, error_response_template, response_template) with salt.utils.files.fopen(self._swagger_file, 'rb') as sf: self._cfg = salt.utils.yaml.safe_load(sf) self._swagger_version = '' else: raise IOError('Invalid swagger file path, {0}'.format(swagger_file_path)) self._validate_swagger_file() self._validate_lambda_funcname_format() self._resolve_api_id() def _is_http_error_rescode(self, code): ''' Helper function to determine if the passed code is in the 400~599 range of http error codes ''' return bool(re.match(r'^\s*[45]\d\d\s*$', code)) def _validate_error_response_model(self, paths, mods): ''' Helper function to help validate the convention established in the swagger file on how to handle response code mapping/integration ''' for path, ops in paths: for opname, opobj in six.iteritems(ops): if opname not in _Swagger.SWAGGER_OPERATION_NAMES: continue if 'responses' not in opobj: raise ValueError('missing mandatory responses field in path item object') for rescode, resobj in six.iteritems(opobj.get('responses')): if not self._is_http_error_rescode(str(rescode)): # future lint: disable=blacklisted-function continue # only check for response code from 400-599 if 'schema' not in resobj: raise ValueError('missing schema field in path {0}, ' 'op {1}, response {2}'.format(path, opname, rescode)) schemaobj = resobj.get('schema') if '$ref' not in schemaobj: raise ValueError('missing $ref field under schema in ' 'path {0}, op {1}, response {2}'.format(path, opname, rescode)) schemaobjref = schemaobj.get('$ref', '/') modelname = schemaobjref.split('/')[-1] if modelname not in mods: raise ValueError('model schema {0} reference not found ' 'under /definitions'.format(schemaobjref)) model = mods.get(modelname) if model.get('type') != 'object': raise ValueError('model schema {0} must be type object'.format(modelname)) if 'properties' not in model: raise ValueError('model schema {0} must have properties fields'.format(modelname)) modelprops = model.get('properties') if 'errorMessage' not in modelprops: raise ValueError('model schema {0} must have errorMessage as a property to ' 'match AWS convention. If pattern is not set, .+ will ' 'be used'.format(modelname)) def _validate_swagger_file(self): ''' High level check/validation of the input swagger file based on https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md This is not a full schema compliance check, but rather make sure that the input file (YAML or JSON) can be read into a dictionary, and we check for the content of the Swagger Object for version and info. ''' # check for any invalid fields for Swagger Object V2 for field in self._cfg: if (field not in _Swagger.SWAGGER_OBJ_V2_FIELDS and not _Swagger.VENDOR_EXT_PATTERN.match(field)): raise ValueError('Invalid Swagger Object Field: {0}'.format(field)) # check for Required Swagger fields by Saltstack boto apigateway state for field in _Swagger.SWAGGER_OBJ_V2_FIELDS_REQUIRED: if field not in self._cfg: raise ValueError('Missing Swagger Object Field: {0}'.format(field)) # check for Swagger Version self._swagger_version = self._cfg.get('swagger') if self._swagger_version not in _Swagger.SWAGGER_VERSIONS_SUPPORTED: raise ValueError('Unsupported Swagger version: {0},' 'Supported versions are {1}'.format(self._swagger_version, _Swagger.SWAGGER_VERSIONS_SUPPORTED)) log.info(type(self._models)) self._validate_error_response_model(self.paths, self._models()) @property def md5_filehash(self): ''' returns md5 hash for the swagger file ''' return self._md5_filehash @property def info(self): ''' returns the swagger info object as a dictionary ''' info = self._cfg.get('info') if not info: raise ValueError('Info Object has no values') return info @property def info_json(self): ''' returns the swagger info object as a pretty printed json string. ''' return _dict_to_json_pretty(self.info) @property def rest_api_name(self): ''' returns the name of the api ''' return self._api_name @property def rest_api_version(self): ''' returns the version field in the swagger info object ''' version = self.info.get('version') if not version: raise ValueError('Missing version value in Info Object') return version def _models(self): ''' returns an iterator for the models specified in the swagger file ''' models = self._cfg.get('definitions') if not models: raise ValueError('Definitions Object has no values, You need to define them in your swagger file') return models def models(self): ''' generator to return the tuple of model and its schema to create on aws. ''' model_dict = self._build_all_dependencies() while True: model = self._get_model_without_dependencies(model_dict) if not model: break yield (model, self._models().get(model)) @property def paths(self): ''' returns an iterator for the relative resource paths specified in the swagger file ''' paths = self._cfg.get('paths') if not paths: raise ValueError('Paths Object has no values, You need to define them in your swagger file') for path in paths: if not path.startswith('/'): raise ValueError('Path object {0} should start with /. Please fix it'.format(path)) return six.iteritems(paths) @property def basePath(self): ''' returns the base path field as defined in the swagger file ''' basePath = self._cfg.get('basePath', '') return basePath @property def restApiId(self): ''' returns the rest api id as returned by AWS on creation of the rest api ''' return self._restApiId @restApiId.setter def restApiId(self, restApiId): ''' allows the assignment of the rest api id on creation of the rest api ''' self._restApiId = restApiId @property def deployment_label_json(self): ''' this property returns the unique description in pretty printed json for a particular api deployment ''' return _dict_to_json_pretty(self.deployment_label) @property def deployment_label(self): ''' this property returns the deployment label dictionary (mainly used by stage description) ''' label = dict() label['swagger_info_object'] = self.info label['api_name'] = self.rest_api_name label['swagger_file'] = os.path.basename(self._swagger_file) label['swagger_file_md5sum'] = self.md5_filehash return label # methods to interact with boto_apigateway execution modules def _one_or_more_stages_remain(self, deploymentId): ''' Helper function to find whether there are other stages still associated with a deployment ''' stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args).get('stages') return bool(stages) def no_more_deployments_remain(self): ''' Helper function to find whether there are deployments left with stages associated ''' no_more_deployments = True deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId, **self._common_aws_args).get('deployments') if deployments: for deployment in deployments: deploymentId = deployment.get('id') stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args).get('stages') if stages: no_more_deployments = False break return no_more_deployments def _get_current_deployment_id(self): ''' Helper method to find the deployment id that the stage name is currently assocaited with. ''' deploymentId = '' stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, **self._common_aws_args).get('stage') if stage: deploymentId = stage.get('deploymentId') return deploymentId def _get_current_deployment_label(self): ''' Helper method to find the deployment label that the stage_name is currently associated with. ''' deploymentId = self._get_current_deployment_id() deployment = __salt__['boto_apigateway.describe_api_deployment'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args).get('deployment') if deployment: return deployment.get('description') return None def _get_desired_deployment_id(self): ''' Helper method to return the deployment id matching the desired deployment label for this Swagger object based on the given api_name, swagger_file ''' deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId, **self._common_aws_args).get('deployments') if deployments: for deployment in deployments: if deployment.get('description') == self.deployment_label_json: return deployment.get('id') return '' def overwrite_stage_variables(self, ret, stage_variables): ''' overwrite the given stage_name's stage variables with the given stage_variables ''' res = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId, stageName=self._stage_name, variables=stage_variables, **self._common_aws_args) if not res.get('overwrite'): ret['result'] = False ret['abort'] = True ret['comment'] = res.get('error') else: ret = _log_changes(ret, 'overwrite_stage_variables', res.get('stage')) return ret def _set_current_deployment(self, stage_desc_json, stage_variables): ''' Helper method to associate the stage_name to the given deploymentId and make this current ''' stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, **self._common_aws_args).get('stage') if not stage: stage = __salt__['boto_apigateway.create_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, deploymentId=self._deploymentId, description=stage_desc_json, variables=stage_variables, **self._common_aws_args) if not stage.get('stage'): return {'set': False, 'error': stage.get('error')} else: # overwrite the stage variables overwrite = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId, stageName=self._stage_name, variables=stage_variables, **self._common_aws_args) if not overwrite.get('stage'): return {'set': False, 'error': overwrite.get('error')} return __salt__['boto_apigateway.activate_api_deployment'](restApiId=self.restApiId, stageName=self._stage_name, deploymentId=self._deploymentId, **self._common_aws_args) def _resolve_api_id(self): ''' returns an Api Id that matches the given api_name and the hardcoded _Swagger.AWS_API_DESCRIPTION as the api description ''' apis = __salt__['boto_apigateway.describe_apis'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args).get('restapi') if apis: if len(apis) == 1: self.restApiId = apis[0].get('id') else: raise ValueError('Multiple APIs matching given name {0} and ' 'description {1}'.format(self.rest_api_name, self.info_json)) def delete_stage(self, ret): ''' Method to delete the given stage_name. If the current deployment tied to the given stage_name has no other stages associated with it, the deployment will be removed as well ''' deploymentId = self._get_current_deployment_id() if deploymentId: result = __salt__['boto_apigateway.delete_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, **self._common_aws_args) if not result.get('deleted'): ret['abort'] = True ret['result'] = False ret['comment'] = 'delete_stage delete_api_stage, {0}'.format(result.get('error')) else: # check if it is safe to delete the deployment as well. if not self._one_or_more_stages_remain(deploymentId): result = __salt__['boto_apigateway.delete_api_deployment'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args) if not result.get('deleted'): ret['abort'] = True ret['result'] = False ret['comment'] = 'delete_stage delete_api_deployment, {0}'.format(result.get('error')) else: ret['comment'] = 'stage {0} has been deleted.\n'.format(self._stage_name) else: # no matching stage_name/deployment found ret['comment'] = 'stage {0} does not exist'.format(self._stage_name) return ret def verify_api(self, ret): ''' this method helps determine if the given stage_name is already on a deployment label matching the input api_name, swagger_file. If yes, returns abort with comment indicating already at desired state. If not and there is previous deployment labels in AWS matching the given input api_name and swagger file, indicate to the caller that we only need to reassociate stage_name to the previously existing deployment label. ''' if self.restApiId: deployed_label_json = self._get_current_deployment_label() if deployed_label_json == self.deployment_label_json: ret['comment'] = ('Already at desired state, the stage {0} is already at the desired ' 'deployment label:\n{1}'.format(self._stage_name, deployed_label_json)) ret['current'] = True return ret else: self._deploymentId = self._get_desired_deployment_id() if self._deploymentId: ret['publish'] = True return ret def publish_api(self, ret, stage_variables): ''' this method tie the given stage_name to a deployment matching the given swagger_file ''' stage_desc = dict() stage_desc['current_deployment_label'] = self.deployment_label stage_desc_json = _dict_to_json_pretty(stage_desc) if self._deploymentId: # just do a reassociate of stage_name to an already existing deployment res = self._set_current_deployment(stage_desc_json, stage_variables) if not res.get('set'): ret['abort'] = True ret['result'] = False ret['comment'] = res.get('error') else: ret = _log_changes(ret, 'publish_api (reassociate deployment, set stage_variables)', res.get('response')) else: # no deployment existed for the given swagger_file for this Swagger object res = __salt__['boto_apigateway.create_api_deployment'](restApiId=self.restApiId, stageName=self._stage_name, stageDescription=stage_desc_json, description=self.deployment_label_json, variables=stage_variables, **self._common_aws_args) if not res.get('created'): ret['abort'] = True ret['result'] = False ret['comment'] = res.get('error') else: ret = _log_changes(ret, 'publish_api (new deployment)', res.get('deployment')) return ret def _cleanup_api(self): ''' Helper method to clean up resources and models if we detected a change in the swagger file for a stage ''' resources = __salt__['boto_apigateway.describe_api_resources'](restApiId=self.restApiId, **self._common_aws_args) if resources.get('resources'): res = resources.get('resources')[1:] res.reverse() for resource in res: delres = __salt__['boto_apigateway.delete_api_resources'](restApiId=self.restApiId, path=resource.get('path'), **self._common_aws_args) if not delres.get('deleted'): return delres models = __salt__['boto_apigateway.describe_api_models'](restApiId=self.restApiId, **self._common_aws_args) if models.get('models'): for model in models.get('models'): delres = __salt__['boto_apigateway.delete_api_model'](restApiId=self.restApiId, modelName=model.get('name'), **self._common_aws_args) if not delres.get('deleted'): return delres return {'deleted': True} def deploy_api(self, ret): ''' this method create the top level rest api in AWS apigateway ''' if self.restApiId: res = self._cleanup_api() if not res.get('deleted'): ret['comment'] = 'Failed to cleanup restAreId {0}'.format(self.restApiId) ret['abort'] = True ret['result'] = False return ret return ret response = __salt__['boto_apigateway.create_api'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args) if not response.get('created'): ret['result'] = False ret['abort'] = True if 'error' in response: ret['comment'] = 'Failed to create rest api: {0}.'.format(response['error']['message']) return ret self.restApiId = response.get('restapi', {}).get('id') return _log_changes(ret, 'deploy_api', response.get('restapi')) def delete_api(self, ret): ''' Method to delete a Rest Api named defined in the swagger file's Info Object's title value. ret a dictionary for returning status to Saltstack ''' exists_response = __salt__['boto_apigateway.api_exists'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args) if exists_response.get('exists'): if __opts__['test']: ret['comment'] = 'Rest API named {0} is set to be deleted.'.format(self.rest_api_name) ret['result'] = None ret['abort'] = True return ret delete_api_response = __salt__['boto_apigateway.delete_api'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args) if not delete_api_response.get('deleted'): ret['result'] = False ret['abort'] = True if 'error' in delete_api_response: ret['comment'] = 'Failed to delete rest api: {0}.'.format(delete_api_response['error']['message']) return ret ret = _log_changes(ret, 'delete_api', delete_api_response) else: ret['comment'] = ('api already absent for swagger file: ' '{0}, desc: {1}'.format(self.rest_api_name, self.info_json)) return ret def _aws_model_ref_from_swagger_ref(self, r): ''' Helper function to reference models created on aws apigw ''' model_name = r.split('/')[-1] return 'https://apigateway.amazonaws.com/restapis/{0}/models/{1}'.format(self.restApiId, model_name) def _update_schema_to_aws_notation(self, schema): ''' Helper function to map model schema to aws notation ''' result = {} for k, v in schema.items(): if k == '$ref': v = self._aws_model_ref_from_swagger_ref(v) if isinstance(v, dict): v = self._update_schema_to_aws_notation(v) result[k] = v return result def _build_dependent_model_list(self, obj_schema): ''' Helper function to build the list of models the given object schema is referencing. ''' dep_models_list = [] if obj_schema: obj_schema['type'] = obj_schema.get('type', 'object') if obj_schema['type'] == 'array': dep_models_list.extend(self._build_dependent_model_list(obj_schema.get('items', {}))) else: ref = obj_schema.get('$ref') if ref: ref_obj_model = ref.split("/")[-1] ref_obj_schema = self._models().get(ref_obj_model) dep_models_list.extend(self._build_dependent_model_list(ref_obj_schema)) dep_models_list.extend([ref_obj_model]) else: # need to walk each property object properties = obj_schema.get('properties') if properties: for _, prop_obj_schema in six.iteritems(properties): dep_models_list.extend(self._build_dependent_model_list(prop_obj_schema)) return list(set(dep_models_list)) def _build_all_dependencies(self): ''' Helper function to build a map of model to their list of model reference dependencies ''' ret = {} for model, schema in six.iteritems(self._models()): dep_list = self._build_dependent_model_list(schema) ret[model] = dep_list return ret def _get_model_without_dependencies(self, models_dict): ''' Helper function to find the next model that should be created ''' next_model = None if not models_dict: return next_model for model, dependencies in six.iteritems(models_dict): if dependencies == []: next_model = model break if next_model is None: raise ValueError('incomplete model definitions, models in dependency ' 'list not defined: {0}'.format(models_dict)) # remove the model from other depednencies before returning models_dict.pop(next_model) for model, dep_list in six.iteritems(models_dict): if next_model in dep_list: dep_list.remove(next_model) return next_model def deploy_models(self, ret): ''' Method to deploy swagger file's definition objects and associated schema to AWS Apigateway as Models ret a dictionary for returning status to Saltstack ''' for model, schema in self.models(): # add in a few attributes into the model schema that AWS expects # _schema = schema.copy() _schema = self._update_schema_to_aws_notation(schema) _schema.update({'$schema': _Swagger.JSON_SCHEMA_DRAFT_4, 'title': '{0} Schema'.format(model)}) # check to see if model already exists, aws has 2 default models [Empty, Error] # which may need upate with data from swagger file model_exists_response = __salt__['boto_apigateway.api_model_exists'](restApiId=self.restApiId, modelName=model, **self._common_aws_args) if model_exists_response.get('exists'): update_model_schema_response = ( __salt__['boto_apigateway.update_api_model_schema'](restApiId=self.restApiId, modelName=model, schema=_dict_to_json_pretty(_schema), **self._common_aws_args)) if not update_model_schema_response.get('updated'): ret['result'] = False ret['abort'] = True if 'error' in update_model_schema_response: ret['comment'] = ('Failed to update existing model {0} with schema {1}, ' 'error: {2}'.format(model, _dict_to_json_pretty(schema), update_model_schema_response['error']['message'])) return ret ret = _log_changes(ret, 'deploy_models', update_model_schema_response) else: create_model_response = ( __salt__['boto_apigateway.create_api_model'](restApiId=self.restApiId, modelName=model, modelDescription=model, schema=_dict_to_json_pretty(_schema), contentType='application/json', **self._common_aws_args)) if not create_model_response.get('created'): ret['result'] = False ret['abort'] = True if 'error' in create_model_response: ret['comment'] = ('Failed to create model {0}, schema {1}, ' 'error: {2}'.format(model, _dict_to_json_pretty(schema), create_model_response['error']['message'])) return ret ret = _log_changes(ret, 'deploy_models', create_model_response) return ret def _lambda_name(self, resourcePath, httpMethod): ''' Helper method to construct lambda name based on the rule specified in doc string of boto_apigateway.api_present function ''' lambda_name = self._lambda_funcname_format.format(stage=self._stage_name, api=self.rest_api_name, resource=resourcePath, method=httpMethod) lambda_name = lambda_name.strip() lambda_name = re.sub(r'{|}', '', lambda_name) lambda_name = re.sub(r'\s+|/', '_', lambda_name).lower() return re.sub(r'_+', '_', lambda_name) def _lambda_uri(self, lambda_name, lambda_region): ''' Helper Method to construct the lambda uri for use in method integration ''' profile = self._common_aws_args.get('profile') region = self._common_aws_args.get('region') lambda_region = __utils__['boto3.get_region']('lambda', lambda_region, profile) apigw_region = __utils__['boto3.get_region']('apigateway', region, profile) lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args) if lambda_region != apigw_region: if not lambda_desc.get('function'): # try look up in the same region as the apigateway as well if previous lookup failed lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args) if not lambda_desc.get('function'): raise ValueError('Could not find lambda function {0} in ' 'regions [{1}, {2}].'.format(lambda_name, lambda_region, apigw_region)) lambda_arn = lambda_desc.get('function').get('FunctionArn') lambda_uri = ('arn:aws:apigateway:{0}:lambda:path/2015-03-31' '/functions/{1}/invocations'.format(apigw_region, lambda_arn)) return lambda_uri def _parse_method_data(self, method_name, method_data): ''' Helper function to construct the method request params, models, request_templates and integration_type values needed to configure method request integration/mappings. ''' method_params = {} method_models = {} if 'parameters' in method_data: for param in method_data['parameters']: p = _Swagger.SwaggerParameter(param) if p.name: method_params[p.name] = True if p.schema: method_models['application/json'] = p.schema request_templates = _Swagger.REQUEST_OPTION_TEMPLATE if method_name == 'options' else _Swagger.REQUEST_TEMPLATE integration_type = "MOCK" if method_name == 'options' else "AWS" return {'params': method_params, 'models': method_models, 'request_templates': request_templates, 'integration_type': integration_type} def _find_patterns(self, o): result = [] if isinstance(o, dict): for k, v in six.iteritems(o): if isinstance(v, dict): result.extend(self._find_patterns(v)) else: if k == 'pattern': result.append(v) return result def _get_pattern_for_schema(self, schema_name, httpStatus): ''' returns the pattern specified in a response schema ''' defaultPattern = '.+' if self._is_http_error_rescode(httpStatus) else '.*' model = self._models().get(schema_name) patterns = self._find_patterns(model) return patterns[0] if patterns else defaultPattern def _get_response_template(self, method_name, http_status): if method_name == 'options' or not self._is_http_error_rescode(http_status): response_templates = {'application/json': self._response_template} \ if self._response_template else self.RESPONSE_OPTION_TEMPLATE else: response_templates = {'application/json': self._error_response_template} \ if self._error_response_template else self.RESPONSE_TEMPLATE return response_templates def _parse_method_response(self, method_name, method_response, httpStatus): ''' Helper function to construct the method response params, models, and integration_params values needed to configure method response integration/mappings. ''' method_response_models = {} method_response_pattern = '.*' if method_response.schema: method_response_models['application/json'] = method_response.schema method_response_pattern = self._get_pattern_for_schema(method_response.schema, httpStatus) method_response_params = {} method_integration_response_params = {} for header in method_response.headers: response_header = 'method.response.header.{0}'.format(header) method_response_params[response_header] = False header_data = method_response.headers.get(header) method_integration_response_params[response_header] = ( "'{0}'".format(header_data.get('default')) if 'default' in header_data else "'*'") response_templates = self._get_response_template(method_name, httpStatus) return {'params': method_response_params, 'models': method_response_models, 'integration_params': method_integration_response_params, 'pattern': method_response_pattern, 'response_templates': response_templates} def _deploy_method(self, ret, resource_path, method_name, method_data, api_key_required, lambda_integration_role, lambda_region, authorization_type): ''' Method to create a method for the given resource path, along with its associated request and response integrations. ret a dictionary for returning status to Saltstack resource_path the full resource path where the named method_name will be associated with. method_name a string that is one of the following values: 'delete', 'get', 'head', 'options', 'patch', 'post', 'put' method_data the value dictionary for this method in the swagger definition file. api_key_required True or False, whether api key is required to access this method. lambda_integration_role name of the IAM role or IAM role arn that Api Gateway will assume when executing the associated lambda function lambda_region the region for the lambda function that Api Gateway will integrate to. authorization_type 'NONE' or 'AWS_IAM' ''' method = self._parse_method_data(method_name.lower(), method_data) # for options method to enable CORS, api_key_required will be set to False always. # authorization_type will be set to 'NONE' always. if method_name.lower() == 'options': api_key_required = False authorization_type = 'NONE' m = __salt__['boto_apigateway.create_api_method'](restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), authorizationType=authorization_type, apiKeyRequired=api_key_required, requestParameters=method.get('params'), requestModels=method.get('models'), **self._common_aws_args) if not m.get('created'): ret = _log_error_and_abort(ret, m) return ret ret = _log_changes(ret, '_deploy_method.create_api_method', m) lambda_uri = "" if method_name.lower() != 'options': lambda_uri = self._lambda_uri(self._lambda_name(resource_path, method_name), lambda_region=lambda_region) # NOTE: integration method is set to POST always, as otherwise AWS makes wrong assumptions # about the intent of the call. HTTP method will be passed to lambda as part of the API gateway context integration = ( __salt__['boto_apigateway.create_api_integration'](restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), integrationType=method.get('integration_type'), integrationHttpMethod='POST', uri=lambda_uri, credentials=lambda_integration_role, requestTemplates=method.get('request_templates'), **self._common_aws_args)) if not integration.get('created'): ret = _log_error_and_abort(ret, integration) return ret ret = _log_changes(ret, '_deploy_method.create_api_integration', integration) if 'responses' in method_data: for response, response_data in six.iteritems(method_data['responses']): httpStatus = str(response) # future lint: disable=blacklisted-function method_response = self._parse_method_response(method_name.lower(), _Swagger.SwaggerMethodResponse(response_data), httpStatus) mr = __salt__['boto_apigateway.create_api_method_response']( restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), statusCode=httpStatus, responseParameters=method_response.get('params'), responseModels=method_response.get('models'), **self._common_aws_args) if not mr.get('created'): ret = _log_error_and_abort(ret, mr) return ret ret = _log_changes(ret, '_deploy_method.create_api_method_response', mr) mir = __salt__['boto_apigateway.create_api_integration_response']( restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), statusCode=httpStatus, selectionPattern=method_response.get('pattern'), responseParameters=method_response.get('integration_params'), responseTemplates=method_response.get('response_templates'), **self._common_aws_args) if not mir.get('created'): ret = _log_error_and_abort(ret, mir) return ret ret = _log_changes(ret, '_deploy_method.create_api_integration_response', mir) else: raise ValueError('No responses specified for {0} {1}'.format(resource_path, method_name)) return ret def deploy_resources(self, ret, api_key_required, lambda_integration_role, lambda_region, authorization_type): ''' Method to deploy resources defined in the swagger file. ret a dictionary for returning status to Saltstack api_key_required True or False, whether api key is required to access this method. lambda_integration_role name of the IAM role or IAM role arn that Api Gateway will assume when executing the associated lambda function lambda_region the region for the lambda function that Api Gateway will integrate to. authorization_type 'NONE' or 'AWS_IAM' ''' for path, pathData in self.paths: resource = __salt__['boto_apigateway.create_api_resources'](restApiId=self.restApiId, path=path, **self._common_aws_args) if not resource.get('created'): ret = _log_error_and_abort(ret, resource) return ret ret = _log_changes(ret, 'deploy_resources', resource) for method, method_data in six.iteritems(pathData): if method in _Swagger.SWAGGER_OPERATION_NAMES: ret = self._deploy_method(ret, path, method, method_data, api_key_required, lambda_integration_role, lambda_region, authorization_type) return ret
saltstack/salt
salt/states/boto_apigateway.py
_Swagger._validate_swagger_file
python
def _validate_swagger_file(self): ''' High level check/validation of the input swagger file based on https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md This is not a full schema compliance check, but rather make sure that the input file (YAML or JSON) can be read into a dictionary, and we check for the content of the Swagger Object for version and info. ''' # check for any invalid fields for Swagger Object V2 for field in self._cfg: if (field not in _Swagger.SWAGGER_OBJ_V2_FIELDS and not _Swagger.VENDOR_EXT_PATTERN.match(field)): raise ValueError('Invalid Swagger Object Field: {0}'.format(field)) # check for Required Swagger fields by Saltstack boto apigateway state for field in _Swagger.SWAGGER_OBJ_V2_FIELDS_REQUIRED: if field not in self._cfg: raise ValueError('Missing Swagger Object Field: {0}'.format(field)) # check for Swagger Version self._swagger_version = self._cfg.get('swagger') if self._swagger_version not in _Swagger.SWAGGER_VERSIONS_SUPPORTED: raise ValueError('Unsupported Swagger version: {0},' 'Supported versions are {1}'.format(self._swagger_version, _Swagger.SWAGGER_VERSIONS_SUPPORTED)) log.info(type(self._models)) self._validate_error_response_model(self.paths, self._models())
High level check/validation of the input swagger file based on https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md This is not a full schema compliance check, but rather make sure that the input file (YAML or JSON) can be read into a dictionary, and we check for the content of the Swagger Object for version and info.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_apigateway.py#L815-L844
null
class _Swagger(object): ''' this is a helper class that holds the swagger definition file and the associated logic related to how to interpret the file and apply it to AWS Api Gateway. The main interface to the outside world is in deploy_api, deploy_models, and deploy_resources methods. ''' SWAGGER_OBJ_V2_FIELDS = ('swagger', 'info', 'host', 'basePath', 'schemes', 'consumes', 'produces', 'paths', 'definitions', 'parameters', 'responses', 'securityDefinitions', 'security', 'tags', 'externalDocs') # SWAGGER OBJECT V2 Fields that are required by boto apigateway states. SWAGGER_OBJ_V2_FIELDS_REQUIRED = ('swagger', 'info', 'basePath', 'schemes', 'paths', 'definitions') # SWAGGER OPERATION NAMES SWAGGER_OPERATION_NAMES = ('get', 'put', 'post', 'delete', 'options', 'head', 'patch') SWAGGER_VERSIONS_SUPPORTED = ('2.0',) # VENDOR SPECIFIC FIELD PATTERNS VENDOR_EXT_PATTERN = re.compile('^x-') # JSON_SCHEMA_REF JSON_SCHEMA_DRAFT_4 = 'http://json-schema.org/draft-04/schema#' # AWS integration templates for normal and options methods REQUEST_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n' '{\n' '"header_params" : {\n' '#set ($map = $input.params().header)\n' '#foreach( $param in $map.entrySet() )\n' '"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n' '#end\n' '},\n' '"query_params" : {\n' '#set ($map = $input.params().querystring)\n' '#foreach( $param in $map.entrySet() )\n' '"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n' '#end\n' '},\n' '"path_params" : {\n' '#set ($map = $input.params().path)\n' '#foreach( $param in $map.entrySet() )\n' '"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n' '#end\n' '},\n' '"apigw_context" : {\n' '"apiId": "$context.apiId",\n' '"httpMethod": "$context.httpMethod",\n' '"requestId": "$context.requestId",\n' '"resourceId": "$context.resourceId",\n' '"resourcePath": "$context.resourcePath",\n' '"stage": "$context.stage",\n' '"identity": {\n' ' "user":"$context.identity.user",\n' ' "userArn":"$context.identity.userArn",\n' ' "userAgent":"$context.identity.userAgent",\n' ' "sourceIp":"$context.identity.sourceIp",\n' ' "cognitoIdentityId":"$context.identity.cognitoIdentityId",\n' ' "cognitoIdentityPoolId":"$context.identity.cognitoIdentityPoolId",\n' ' "cognitoAuthenticationType":"$context.identity.cognitoAuthenticationType",\n' ' "cognitoAuthenticationProvider":["$util.escapeJavaScript($context.identity.cognitoAuthenticationProvider)"],\n' ' "caller":"$context.identity.caller",\n' ' "apiKey":"$context.identity.apiKey",\n' ' "accountId":"$context.identity.accountId"\n' '}\n' '},\n' '"body_params" : $input.json(\'$\'),\n' '"stage_variables": {\n' '#foreach($variable in $stageVariables.keySet())\n' '"$variable": "$util.escapeJavaScript($stageVariables.get($variable))"\n' '#if($foreach.hasNext), #end\n' '#end\n' '}\n' '}'} REQUEST_OPTION_TEMPLATE = {'application/json': '{"statusCode": 200}'} # AWS integration response template mapping to convert stackTrace part or the error # to a uniform format containing strings only. Swagger does not seem to allow defining # an array of non-uniform types, to it is not possible to create error model to match # exactly what comes out of lambda functions in case of error. RESPONSE_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n' '{\n' ' "errorMessage" : "$inputRoot.errorMessage",\n' ' "errorType" : "$inputRoot.errorType",\n' ' "stackTrace" : [\n' '#foreach($stackTrace in $inputRoot.stackTrace)\n' ' [\n' '#foreach($elem in $stackTrace)\n' ' "$elem"\n' '#if($foreach.hasNext),#end\n' '#end\n' ' ]\n' '#if($foreach.hasNext),#end\n' '#end\n' ' ]\n' '}'} RESPONSE_OPTION_TEMPLATE = {} # This string should not be modified, every API created by this state will carry the description # below. AWS_API_DESCRIPTION = _dict_to_json_pretty({"provisioned_by": "Salt boto_apigateway.present State", "context": "See deployment or stage description"}) class SwaggerParameter(object): ''' This is a helper class for the Swagger Parameter Object ''' LOCATIONS = ('body', 'query', 'header', 'path') def __init__(self, paramdict): self._paramdict = paramdict @property def location(self): ''' returns location in the swagger parameter object ''' _location = self._paramdict.get('in') if _location in _Swagger.SwaggerParameter.LOCATIONS: return _location raise ValueError('Unsupported parameter location: {0} in Parameter Object'.format(_location)) @property def name(self): ''' returns parameter name in the swagger parameter object ''' _name = self._paramdict.get('name') if _name: if self.location == 'header': return 'method.request.header.{0}'.format(_name) elif self.location == 'query': return 'method.request.querystring.{0}'.format(_name) elif self.location == 'path': return 'method.request.path.{0}'.format(_name) return None raise ValueError('Parameter must have a name: {0}'.format(_dict_to_json_pretty(self._paramdict))) @property def schema(self): ''' returns the name of the schema given the reference in the swagger parameter object ''' if self.location == 'body': _schema = self._paramdict.get('schema') if _schema: if '$ref' in _schema: schema_name = _schema.get('$ref').split('/')[-1] return schema_name raise ValueError(('Body parameter must have a JSON reference ' 'to the schema definition due to Amazon API restrictions: {0}'.format(self.name))) raise ValueError('Body parameter must have a schema: {0}'.format(self.name)) return None class SwaggerMethodResponse(object): ''' Helper class for Swagger Method Response Object ''' def __init__(self, r): self._r = r @property def schema(self): ''' returns the name of the schema given the reference in the swagger method response object ''' _schema = self._r.get('schema') if _schema: if '$ref' in _schema: return _schema.get('$ref').split('/')[-1] raise ValueError(('Method response must have a JSON reference ' 'to the schema definition: {0}'.format(_schema))) return None @property def headers(self): ''' returns the headers dictionary in the method response object ''' _headers = self._r.get('headers', {}) return _headers def __init__(self, api_name, stage_name, lambda_funcname_format, swagger_file_path, error_response_template, response_template, common_aws_args): self._api_name = api_name self._stage_name = stage_name self._lambda_funcname_format = lambda_funcname_format self._common_aws_args = common_aws_args self._restApiId = '' self._deploymentId = '' self._error_response_template = error_response_template self._response_template = response_template if swagger_file_path is not None: if os.path.exists(swagger_file_path) and os.path.isfile(swagger_file_path): self._swagger_file = swagger_file_path self._md5_filehash = _gen_md5_filehash(self._swagger_file, error_response_template, response_template) with salt.utils.files.fopen(self._swagger_file, 'rb') as sf: self._cfg = salt.utils.yaml.safe_load(sf) self._swagger_version = '' else: raise IOError('Invalid swagger file path, {0}'.format(swagger_file_path)) self._validate_swagger_file() self._validate_lambda_funcname_format() self._resolve_api_id() def _is_http_error_rescode(self, code): ''' Helper function to determine if the passed code is in the 400~599 range of http error codes ''' return bool(re.match(r'^\s*[45]\d\d\s*$', code)) def _validate_error_response_model(self, paths, mods): ''' Helper function to help validate the convention established in the swagger file on how to handle response code mapping/integration ''' for path, ops in paths: for opname, opobj in six.iteritems(ops): if opname not in _Swagger.SWAGGER_OPERATION_NAMES: continue if 'responses' not in opobj: raise ValueError('missing mandatory responses field in path item object') for rescode, resobj in six.iteritems(opobj.get('responses')): if not self._is_http_error_rescode(str(rescode)): # future lint: disable=blacklisted-function continue # only check for response code from 400-599 if 'schema' not in resobj: raise ValueError('missing schema field in path {0}, ' 'op {1}, response {2}'.format(path, opname, rescode)) schemaobj = resobj.get('schema') if '$ref' not in schemaobj: raise ValueError('missing $ref field under schema in ' 'path {0}, op {1}, response {2}'.format(path, opname, rescode)) schemaobjref = schemaobj.get('$ref', '/') modelname = schemaobjref.split('/')[-1] if modelname not in mods: raise ValueError('model schema {0} reference not found ' 'under /definitions'.format(schemaobjref)) model = mods.get(modelname) if model.get('type') != 'object': raise ValueError('model schema {0} must be type object'.format(modelname)) if 'properties' not in model: raise ValueError('model schema {0} must have properties fields'.format(modelname)) modelprops = model.get('properties') if 'errorMessage' not in modelprops: raise ValueError('model schema {0} must have errorMessage as a property to ' 'match AWS convention. If pattern is not set, .+ will ' 'be used'.format(modelname)) def _validate_lambda_funcname_format(self): ''' Checks if the lambda function name format contains only known elements :return: True on success, ValueError raised on error ''' try: if self._lambda_funcname_format: known_kwargs = dict(stage='', api='', resource='', method='') self._lambda_funcname_format.format(**known_kwargs) return True except Exception: raise ValueError('Invalid lambda_funcname_format {0}. Please review ' 'documentation for known substitutable keys'.format(self._lambda_funcname_format)) @property def md5_filehash(self): ''' returns md5 hash for the swagger file ''' return self._md5_filehash @property def info(self): ''' returns the swagger info object as a dictionary ''' info = self._cfg.get('info') if not info: raise ValueError('Info Object has no values') return info @property def info_json(self): ''' returns the swagger info object as a pretty printed json string. ''' return _dict_to_json_pretty(self.info) @property def rest_api_name(self): ''' returns the name of the api ''' return self._api_name @property def rest_api_version(self): ''' returns the version field in the swagger info object ''' version = self.info.get('version') if not version: raise ValueError('Missing version value in Info Object') return version def _models(self): ''' returns an iterator for the models specified in the swagger file ''' models = self._cfg.get('definitions') if not models: raise ValueError('Definitions Object has no values, You need to define them in your swagger file') return models def models(self): ''' generator to return the tuple of model and its schema to create on aws. ''' model_dict = self._build_all_dependencies() while True: model = self._get_model_without_dependencies(model_dict) if not model: break yield (model, self._models().get(model)) @property def paths(self): ''' returns an iterator for the relative resource paths specified in the swagger file ''' paths = self._cfg.get('paths') if not paths: raise ValueError('Paths Object has no values, You need to define them in your swagger file') for path in paths: if not path.startswith('/'): raise ValueError('Path object {0} should start with /. Please fix it'.format(path)) return six.iteritems(paths) @property def basePath(self): ''' returns the base path field as defined in the swagger file ''' basePath = self._cfg.get('basePath', '') return basePath @property def restApiId(self): ''' returns the rest api id as returned by AWS on creation of the rest api ''' return self._restApiId @restApiId.setter def restApiId(self, restApiId): ''' allows the assignment of the rest api id on creation of the rest api ''' self._restApiId = restApiId @property def deployment_label_json(self): ''' this property returns the unique description in pretty printed json for a particular api deployment ''' return _dict_to_json_pretty(self.deployment_label) @property def deployment_label(self): ''' this property returns the deployment label dictionary (mainly used by stage description) ''' label = dict() label['swagger_info_object'] = self.info label['api_name'] = self.rest_api_name label['swagger_file'] = os.path.basename(self._swagger_file) label['swagger_file_md5sum'] = self.md5_filehash return label # methods to interact with boto_apigateway execution modules def _one_or_more_stages_remain(self, deploymentId): ''' Helper function to find whether there are other stages still associated with a deployment ''' stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args).get('stages') return bool(stages) def no_more_deployments_remain(self): ''' Helper function to find whether there are deployments left with stages associated ''' no_more_deployments = True deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId, **self._common_aws_args).get('deployments') if deployments: for deployment in deployments: deploymentId = deployment.get('id') stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args).get('stages') if stages: no_more_deployments = False break return no_more_deployments def _get_current_deployment_id(self): ''' Helper method to find the deployment id that the stage name is currently assocaited with. ''' deploymentId = '' stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, **self._common_aws_args).get('stage') if stage: deploymentId = stage.get('deploymentId') return deploymentId def _get_current_deployment_label(self): ''' Helper method to find the deployment label that the stage_name is currently associated with. ''' deploymentId = self._get_current_deployment_id() deployment = __salt__['boto_apigateway.describe_api_deployment'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args).get('deployment') if deployment: return deployment.get('description') return None def _get_desired_deployment_id(self): ''' Helper method to return the deployment id matching the desired deployment label for this Swagger object based on the given api_name, swagger_file ''' deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId, **self._common_aws_args).get('deployments') if deployments: for deployment in deployments: if deployment.get('description') == self.deployment_label_json: return deployment.get('id') return '' def overwrite_stage_variables(self, ret, stage_variables): ''' overwrite the given stage_name's stage variables with the given stage_variables ''' res = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId, stageName=self._stage_name, variables=stage_variables, **self._common_aws_args) if not res.get('overwrite'): ret['result'] = False ret['abort'] = True ret['comment'] = res.get('error') else: ret = _log_changes(ret, 'overwrite_stage_variables', res.get('stage')) return ret def _set_current_deployment(self, stage_desc_json, stage_variables): ''' Helper method to associate the stage_name to the given deploymentId and make this current ''' stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, **self._common_aws_args).get('stage') if not stage: stage = __salt__['boto_apigateway.create_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, deploymentId=self._deploymentId, description=stage_desc_json, variables=stage_variables, **self._common_aws_args) if not stage.get('stage'): return {'set': False, 'error': stage.get('error')} else: # overwrite the stage variables overwrite = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId, stageName=self._stage_name, variables=stage_variables, **self._common_aws_args) if not overwrite.get('stage'): return {'set': False, 'error': overwrite.get('error')} return __salt__['boto_apigateway.activate_api_deployment'](restApiId=self.restApiId, stageName=self._stage_name, deploymentId=self._deploymentId, **self._common_aws_args) def _resolve_api_id(self): ''' returns an Api Id that matches the given api_name and the hardcoded _Swagger.AWS_API_DESCRIPTION as the api description ''' apis = __salt__['boto_apigateway.describe_apis'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args).get('restapi') if apis: if len(apis) == 1: self.restApiId = apis[0].get('id') else: raise ValueError('Multiple APIs matching given name {0} and ' 'description {1}'.format(self.rest_api_name, self.info_json)) def delete_stage(self, ret): ''' Method to delete the given stage_name. If the current deployment tied to the given stage_name has no other stages associated with it, the deployment will be removed as well ''' deploymentId = self._get_current_deployment_id() if deploymentId: result = __salt__['boto_apigateway.delete_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, **self._common_aws_args) if not result.get('deleted'): ret['abort'] = True ret['result'] = False ret['comment'] = 'delete_stage delete_api_stage, {0}'.format(result.get('error')) else: # check if it is safe to delete the deployment as well. if not self._one_or_more_stages_remain(deploymentId): result = __salt__['boto_apigateway.delete_api_deployment'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args) if not result.get('deleted'): ret['abort'] = True ret['result'] = False ret['comment'] = 'delete_stage delete_api_deployment, {0}'.format(result.get('error')) else: ret['comment'] = 'stage {0} has been deleted.\n'.format(self._stage_name) else: # no matching stage_name/deployment found ret['comment'] = 'stage {0} does not exist'.format(self._stage_name) return ret def verify_api(self, ret): ''' this method helps determine if the given stage_name is already on a deployment label matching the input api_name, swagger_file. If yes, returns abort with comment indicating already at desired state. If not and there is previous deployment labels in AWS matching the given input api_name and swagger file, indicate to the caller that we only need to reassociate stage_name to the previously existing deployment label. ''' if self.restApiId: deployed_label_json = self._get_current_deployment_label() if deployed_label_json == self.deployment_label_json: ret['comment'] = ('Already at desired state, the stage {0} is already at the desired ' 'deployment label:\n{1}'.format(self._stage_name, deployed_label_json)) ret['current'] = True return ret else: self._deploymentId = self._get_desired_deployment_id() if self._deploymentId: ret['publish'] = True return ret def publish_api(self, ret, stage_variables): ''' this method tie the given stage_name to a deployment matching the given swagger_file ''' stage_desc = dict() stage_desc['current_deployment_label'] = self.deployment_label stage_desc_json = _dict_to_json_pretty(stage_desc) if self._deploymentId: # just do a reassociate of stage_name to an already existing deployment res = self._set_current_deployment(stage_desc_json, stage_variables) if not res.get('set'): ret['abort'] = True ret['result'] = False ret['comment'] = res.get('error') else: ret = _log_changes(ret, 'publish_api (reassociate deployment, set stage_variables)', res.get('response')) else: # no deployment existed for the given swagger_file for this Swagger object res = __salt__['boto_apigateway.create_api_deployment'](restApiId=self.restApiId, stageName=self._stage_name, stageDescription=stage_desc_json, description=self.deployment_label_json, variables=stage_variables, **self._common_aws_args) if not res.get('created'): ret['abort'] = True ret['result'] = False ret['comment'] = res.get('error') else: ret = _log_changes(ret, 'publish_api (new deployment)', res.get('deployment')) return ret def _cleanup_api(self): ''' Helper method to clean up resources and models if we detected a change in the swagger file for a stage ''' resources = __salt__['boto_apigateway.describe_api_resources'](restApiId=self.restApiId, **self._common_aws_args) if resources.get('resources'): res = resources.get('resources')[1:] res.reverse() for resource in res: delres = __salt__['boto_apigateway.delete_api_resources'](restApiId=self.restApiId, path=resource.get('path'), **self._common_aws_args) if not delres.get('deleted'): return delres models = __salt__['boto_apigateway.describe_api_models'](restApiId=self.restApiId, **self._common_aws_args) if models.get('models'): for model in models.get('models'): delres = __salt__['boto_apigateway.delete_api_model'](restApiId=self.restApiId, modelName=model.get('name'), **self._common_aws_args) if not delres.get('deleted'): return delres return {'deleted': True} def deploy_api(self, ret): ''' this method create the top level rest api in AWS apigateway ''' if self.restApiId: res = self._cleanup_api() if not res.get('deleted'): ret['comment'] = 'Failed to cleanup restAreId {0}'.format(self.restApiId) ret['abort'] = True ret['result'] = False return ret return ret response = __salt__['boto_apigateway.create_api'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args) if not response.get('created'): ret['result'] = False ret['abort'] = True if 'error' in response: ret['comment'] = 'Failed to create rest api: {0}.'.format(response['error']['message']) return ret self.restApiId = response.get('restapi', {}).get('id') return _log_changes(ret, 'deploy_api', response.get('restapi')) def delete_api(self, ret): ''' Method to delete a Rest Api named defined in the swagger file's Info Object's title value. ret a dictionary for returning status to Saltstack ''' exists_response = __salt__['boto_apigateway.api_exists'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args) if exists_response.get('exists'): if __opts__['test']: ret['comment'] = 'Rest API named {0} is set to be deleted.'.format(self.rest_api_name) ret['result'] = None ret['abort'] = True return ret delete_api_response = __salt__['boto_apigateway.delete_api'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args) if not delete_api_response.get('deleted'): ret['result'] = False ret['abort'] = True if 'error' in delete_api_response: ret['comment'] = 'Failed to delete rest api: {0}.'.format(delete_api_response['error']['message']) return ret ret = _log_changes(ret, 'delete_api', delete_api_response) else: ret['comment'] = ('api already absent for swagger file: ' '{0}, desc: {1}'.format(self.rest_api_name, self.info_json)) return ret def _aws_model_ref_from_swagger_ref(self, r): ''' Helper function to reference models created on aws apigw ''' model_name = r.split('/')[-1] return 'https://apigateway.amazonaws.com/restapis/{0}/models/{1}'.format(self.restApiId, model_name) def _update_schema_to_aws_notation(self, schema): ''' Helper function to map model schema to aws notation ''' result = {} for k, v in schema.items(): if k == '$ref': v = self._aws_model_ref_from_swagger_ref(v) if isinstance(v, dict): v = self._update_schema_to_aws_notation(v) result[k] = v return result def _build_dependent_model_list(self, obj_schema): ''' Helper function to build the list of models the given object schema is referencing. ''' dep_models_list = [] if obj_schema: obj_schema['type'] = obj_schema.get('type', 'object') if obj_schema['type'] == 'array': dep_models_list.extend(self._build_dependent_model_list(obj_schema.get('items', {}))) else: ref = obj_schema.get('$ref') if ref: ref_obj_model = ref.split("/")[-1] ref_obj_schema = self._models().get(ref_obj_model) dep_models_list.extend(self._build_dependent_model_list(ref_obj_schema)) dep_models_list.extend([ref_obj_model]) else: # need to walk each property object properties = obj_schema.get('properties') if properties: for _, prop_obj_schema in six.iteritems(properties): dep_models_list.extend(self._build_dependent_model_list(prop_obj_schema)) return list(set(dep_models_list)) def _build_all_dependencies(self): ''' Helper function to build a map of model to their list of model reference dependencies ''' ret = {} for model, schema in six.iteritems(self._models()): dep_list = self._build_dependent_model_list(schema) ret[model] = dep_list return ret def _get_model_without_dependencies(self, models_dict): ''' Helper function to find the next model that should be created ''' next_model = None if not models_dict: return next_model for model, dependencies in six.iteritems(models_dict): if dependencies == []: next_model = model break if next_model is None: raise ValueError('incomplete model definitions, models in dependency ' 'list not defined: {0}'.format(models_dict)) # remove the model from other depednencies before returning models_dict.pop(next_model) for model, dep_list in six.iteritems(models_dict): if next_model in dep_list: dep_list.remove(next_model) return next_model def deploy_models(self, ret): ''' Method to deploy swagger file's definition objects and associated schema to AWS Apigateway as Models ret a dictionary for returning status to Saltstack ''' for model, schema in self.models(): # add in a few attributes into the model schema that AWS expects # _schema = schema.copy() _schema = self._update_schema_to_aws_notation(schema) _schema.update({'$schema': _Swagger.JSON_SCHEMA_DRAFT_4, 'title': '{0} Schema'.format(model)}) # check to see if model already exists, aws has 2 default models [Empty, Error] # which may need upate with data from swagger file model_exists_response = __salt__['boto_apigateway.api_model_exists'](restApiId=self.restApiId, modelName=model, **self._common_aws_args) if model_exists_response.get('exists'): update_model_schema_response = ( __salt__['boto_apigateway.update_api_model_schema'](restApiId=self.restApiId, modelName=model, schema=_dict_to_json_pretty(_schema), **self._common_aws_args)) if not update_model_schema_response.get('updated'): ret['result'] = False ret['abort'] = True if 'error' in update_model_schema_response: ret['comment'] = ('Failed to update existing model {0} with schema {1}, ' 'error: {2}'.format(model, _dict_to_json_pretty(schema), update_model_schema_response['error']['message'])) return ret ret = _log_changes(ret, 'deploy_models', update_model_schema_response) else: create_model_response = ( __salt__['boto_apigateway.create_api_model'](restApiId=self.restApiId, modelName=model, modelDescription=model, schema=_dict_to_json_pretty(_schema), contentType='application/json', **self._common_aws_args)) if not create_model_response.get('created'): ret['result'] = False ret['abort'] = True if 'error' in create_model_response: ret['comment'] = ('Failed to create model {0}, schema {1}, ' 'error: {2}'.format(model, _dict_to_json_pretty(schema), create_model_response['error']['message'])) return ret ret = _log_changes(ret, 'deploy_models', create_model_response) return ret def _lambda_name(self, resourcePath, httpMethod): ''' Helper method to construct lambda name based on the rule specified in doc string of boto_apigateway.api_present function ''' lambda_name = self._lambda_funcname_format.format(stage=self._stage_name, api=self.rest_api_name, resource=resourcePath, method=httpMethod) lambda_name = lambda_name.strip() lambda_name = re.sub(r'{|}', '', lambda_name) lambda_name = re.sub(r'\s+|/', '_', lambda_name).lower() return re.sub(r'_+', '_', lambda_name) def _lambda_uri(self, lambda_name, lambda_region): ''' Helper Method to construct the lambda uri for use in method integration ''' profile = self._common_aws_args.get('profile') region = self._common_aws_args.get('region') lambda_region = __utils__['boto3.get_region']('lambda', lambda_region, profile) apigw_region = __utils__['boto3.get_region']('apigateway', region, profile) lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args) if lambda_region != apigw_region: if not lambda_desc.get('function'): # try look up in the same region as the apigateway as well if previous lookup failed lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args) if not lambda_desc.get('function'): raise ValueError('Could not find lambda function {0} in ' 'regions [{1}, {2}].'.format(lambda_name, lambda_region, apigw_region)) lambda_arn = lambda_desc.get('function').get('FunctionArn') lambda_uri = ('arn:aws:apigateway:{0}:lambda:path/2015-03-31' '/functions/{1}/invocations'.format(apigw_region, lambda_arn)) return lambda_uri def _parse_method_data(self, method_name, method_data): ''' Helper function to construct the method request params, models, request_templates and integration_type values needed to configure method request integration/mappings. ''' method_params = {} method_models = {} if 'parameters' in method_data: for param in method_data['parameters']: p = _Swagger.SwaggerParameter(param) if p.name: method_params[p.name] = True if p.schema: method_models['application/json'] = p.schema request_templates = _Swagger.REQUEST_OPTION_TEMPLATE if method_name == 'options' else _Swagger.REQUEST_TEMPLATE integration_type = "MOCK" if method_name == 'options' else "AWS" return {'params': method_params, 'models': method_models, 'request_templates': request_templates, 'integration_type': integration_type} def _find_patterns(self, o): result = [] if isinstance(o, dict): for k, v in six.iteritems(o): if isinstance(v, dict): result.extend(self._find_patterns(v)) else: if k == 'pattern': result.append(v) return result def _get_pattern_for_schema(self, schema_name, httpStatus): ''' returns the pattern specified in a response schema ''' defaultPattern = '.+' if self._is_http_error_rescode(httpStatus) else '.*' model = self._models().get(schema_name) patterns = self._find_patterns(model) return patterns[0] if patterns else defaultPattern def _get_response_template(self, method_name, http_status): if method_name == 'options' or not self._is_http_error_rescode(http_status): response_templates = {'application/json': self._response_template} \ if self._response_template else self.RESPONSE_OPTION_TEMPLATE else: response_templates = {'application/json': self._error_response_template} \ if self._error_response_template else self.RESPONSE_TEMPLATE return response_templates def _parse_method_response(self, method_name, method_response, httpStatus): ''' Helper function to construct the method response params, models, and integration_params values needed to configure method response integration/mappings. ''' method_response_models = {} method_response_pattern = '.*' if method_response.schema: method_response_models['application/json'] = method_response.schema method_response_pattern = self._get_pattern_for_schema(method_response.schema, httpStatus) method_response_params = {} method_integration_response_params = {} for header in method_response.headers: response_header = 'method.response.header.{0}'.format(header) method_response_params[response_header] = False header_data = method_response.headers.get(header) method_integration_response_params[response_header] = ( "'{0}'".format(header_data.get('default')) if 'default' in header_data else "'*'") response_templates = self._get_response_template(method_name, httpStatus) return {'params': method_response_params, 'models': method_response_models, 'integration_params': method_integration_response_params, 'pattern': method_response_pattern, 'response_templates': response_templates} def _deploy_method(self, ret, resource_path, method_name, method_data, api_key_required, lambda_integration_role, lambda_region, authorization_type): ''' Method to create a method for the given resource path, along with its associated request and response integrations. ret a dictionary for returning status to Saltstack resource_path the full resource path where the named method_name will be associated with. method_name a string that is one of the following values: 'delete', 'get', 'head', 'options', 'patch', 'post', 'put' method_data the value dictionary for this method in the swagger definition file. api_key_required True or False, whether api key is required to access this method. lambda_integration_role name of the IAM role or IAM role arn that Api Gateway will assume when executing the associated lambda function lambda_region the region for the lambda function that Api Gateway will integrate to. authorization_type 'NONE' or 'AWS_IAM' ''' method = self._parse_method_data(method_name.lower(), method_data) # for options method to enable CORS, api_key_required will be set to False always. # authorization_type will be set to 'NONE' always. if method_name.lower() == 'options': api_key_required = False authorization_type = 'NONE' m = __salt__['boto_apigateway.create_api_method'](restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), authorizationType=authorization_type, apiKeyRequired=api_key_required, requestParameters=method.get('params'), requestModels=method.get('models'), **self._common_aws_args) if not m.get('created'): ret = _log_error_and_abort(ret, m) return ret ret = _log_changes(ret, '_deploy_method.create_api_method', m) lambda_uri = "" if method_name.lower() != 'options': lambda_uri = self._lambda_uri(self._lambda_name(resource_path, method_name), lambda_region=lambda_region) # NOTE: integration method is set to POST always, as otherwise AWS makes wrong assumptions # about the intent of the call. HTTP method will be passed to lambda as part of the API gateway context integration = ( __salt__['boto_apigateway.create_api_integration'](restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), integrationType=method.get('integration_type'), integrationHttpMethod='POST', uri=lambda_uri, credentials=lambda_integration_role, requestTemplates=method.get('request_templates'), **self._common_aws_args)) if not integration.get('created'): ret = _log_error_and_abort(ret, integration) return ret ret = _log_changes(ret, '_deploy_method.create_api_integration', integration) if 'responses' in method_data: for response, response_data in six.iteritems(method_data['responses']): httpStatus = str(response) # future lint: disable=blacklisted-function method_response = self._parse_method_response(method_name.lower(), _Swagger.SwaggerMethodResponse(response_data), httpStatus) mr = __salt__['boto_apigateway.create_api_method_response']( restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), statusCode=httpStatus, responseParameters=method_response.get('params'), responseModels=method_response.get('models'), **self._common_aws_args) if not mr.get('created'): ret = _log_error_and_abort(ret, mr) return ret ret = _log_changes(ret, '_deploy_method.create_api_method_response', mr) mir = __salt__['boto_apigateway.create_api_integration_response']( restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), statusCode=httpStatus, selectionPattern=method_response.get('pattern'), responseParameters=method_response.get('integration_params'), responseTemplates=method_response.get('response_templates'), **self._common_aws_args) if not mir.get('created'): ret = _log_error_and_abort(ret, mir) return ret ret = _log_changes(ret, '_deploy_method.create_api_integration_response', mir) else: raise ValueError('No responses specified for {0} {1}'.format(resource_path, method_name)) return ret def deploy_resources(self, ret, api_key_required, lambda_integration_role, lambda_region, authorization_type): ''' Method to deploy resources defined in the swagger file. ret a dictionary for returning status to Saltstack api_key_required True or False, whether api key is required to access this method. lambda_integration_role name of the IAM role or IAM role arn that Api Gateway will assume when executing the associated lambda function lambda_region the region for the lambda function that Api Gateway will integrate to. authorization_type 'NONE' or 'AWS_IAM' ''' for path, pathData in self.paths: resource = __salt__['boto_apigateway.create_api_resources'](restApiId=self.restApiId, path=path, **self._common_aws_args) if not resource.get('created'): ret = _log_error_and_abort(ret, resource) return ret ret = _log_changes(ret, 'deploy_resources', resource) for method, method_data in six.iteritems(pathData): if method in _Swagger.SWAGGER_OPERATION_NAMES: ret = self._deploy_method(ret, path, method, method_data, api_key_required, lambda_integration_role, lambda_region, authorization_type) return ret
saltstack/salt
salt/states/boto_apigateway.py
_Swagger.models
python
def models(self): ''' generator to return the tuple of model and its schema to create on aws. ''' model_dict = self._build_all_dependencies() while True: model = self._get_model_without_dependencies(model_dict) if not model: break yield (model, self._models().get(model))
generator to return the tuple of model and its schema to create on aws.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_apigateway.py#L898-L907
[ "def _models(self):\n '''\n returns an iterator for the models specified in the swagger file\n '''\n models = self._cfg.get('definitions')\n if not models:\n raise ValueError('Definitions Object has no values, You need to define them in your swagger file')\n\n return models\n", "def _build_all_dependencies(self):\n '''\n Helper function to build a map of model to their list of model reference dependencies\n '''\n ret = {}\n for model, schema in six.iteritems(self._models()):\n dep_list = self._build_dependent_model_list(schema)\n ret[model] = dep_list\n return ret\n", "def _get_model_without_dependencies(self, models_dict):\n '''\n Helper function to find the next model that should be created\n '''\n next_model = None\n if not models_dict:\n return next_model\n\n for model, dependencies in six.iteritems(models_dict):\n if dependencies == []:\n next_model = model\n break\n\n if next_model is None:\n raise ValueError('incomplete model definitions, models in dependency '\n 'list not defined: {0}'.format(models_dict))\n\n # remove the model from other depednencies before returning\n models_dict.pop(next_model)\n for model, dep_list in six.iteritems(models_dict):\n if next_model in dep_list:\n dep_list.remove(next_model)\n\n return next_model\n" ]
class _Swagger(object): ''' this is a helper class that holds the swagger definition file and the associated logic related to how to interpret the file and apply it to AWS Api Gateway. The main interface to the outside world is in deploy_api, deploy_models, and deploy_resources methods. ''' SWAGGER_OBJ_V2_FIELDS = ('swagger', 'info', 'host', 'basePath', 'schemes', 'consumes', 'produces', 'paths', 'definitions', 'parameters', 'responses', 'securityDefinitions', 'security', 'tags', 'externalDocs') # SWAGGER OBJECT V2 Fields that are required by boto apigateway states. SWAGGER_OBJ_V2_FIELDS_REQUIRED = ('swagger', 'info', 'basePath', 'schemes', 'paths', 'definitions') # SWAGGER OPERATION NAMES SWAGGER_OPERATION_NAMES = ('get', 'put', 'post', 'delete', 'options', 'head', 'patch') SWAGGER_VERSIONS_SUPPORTED = ('2.0',) # VENDOR SPECIFIC FIELD PATTERNS VENDOR_EXT_PATTERN = re.compile('^x-') # JSON_SCHEMA_REF JSON_SCHEMA_DRAFT_4 = 'http://json-schema.org/draft-04/schema#' # AWS integration templates for normal and options methods REQUEST_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n' '{\n' '"header_params" : {\n' '#set ($map = $input.params().header)\n' '#foreach( $param in $map.entrySet() )\n' '"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n' '#end\n' '},\n' '"query_params" : {\n' '#set ($map = $input.params().querystring)\n' '#foreach( $param in $map.entrySet() )\n' '"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n' '#end\n' '},\n' '"path_params" : {\n' '#set ($map = $input.params().path)\n' '#foreach( $param in $map.entrySet() )\n' '"$param.key" : "$param.value" #if( $foreach.hasNext ), #end\n' '#end\n' '},\n' '"apigw_context" : {\n' '"apiId": "$context.apiId",\n' '"httpMethod": "$context.httpMethod",\n' '"requestId": "$context.requestId",\n' '"resourceId": "$context.resourceId",\n' '"resourcePath": "$context.resourcePath",\n' '"stage": "$context.stage",\n' '"identity": {\n' ' "user":"$context.identity.user",\n' ' "userArn":"$context.identity.userArn",\n' ' "userAgent":"$context.identity.userAgent",\n' ' "sourceIp":"$context.identity.sourceIp",\n' ' "cognitoIdentityId":"$context.identity.cognitoIdentityId",\n' ' "cognitoIdentityPoolId":"$context.identity.cognitoIdentityPoolId",\n' ' "cognitoAuthenticationType":"$context.identity.cognitoAuthenticationType",\n' ' "cognitoAuthenticationProvider":["$util.escapeJavaScript($context.identity.cognitoAuthenticationProvider)"],\n' ' "caller":"$context.identity.caller",\n' ' "apiKey":"$context.identity.apiKey",\n' ' "accountId":"$context.identity.accountId"\n' '}\n' '},\n' '"body_params" : $input.json(\'$\'),\n' '"stage_variables": {\n' '#foreach($variable in $stageVariables.keySet())\n' '"$variable": "$util.escapeJavaScript($stageVariables.get($variable))"\n' '#if($foreach.hasNext), #end\n' '#end\n' '}\n' '}'} REQUEST_OPTION_TEMPLATE = {'application/json': '{"statusCode": 200}'} # AWS integration response template mapping to convert stackTrace part or the error # to a uniform format containing strings only. Swagger does not seem to allow defining # an array of non-uniform types, to it is not possible to create error model to match # exactly what comes out of lambda functions in case of error. RESPONSE_TEMPLATE = {'application/json': '#set($inputRoot = $input.path(\'$\'))\n' '{\n' ' "errorMessage" : "$inputRoot.errorMessage",\n' ' "errorType" : "$inputRoot.errorType",\n' ' "stackTrace" : [\n' '#foreach($stackTrace in $inputRoot.stackTrace)\n' ' [\n' '#foreach($elem in $stackTrace)\n' ' "$elem"\n' '#if($foreach.hasNext),#end\n' '#end\n' ' ]\n' '#if($foreach.hasNext),#end\n' '#end\n' ' ]\n' '}'} RESPONSE_OPTION_TEMPLATE = {} # This string should not be modified, every API created by this state will carry the description # below. AWS_API_DESCRIPTION = _dict_to_json_pretty({"provisioned_by": "Salt boto_apigateway.present State", "context": "See deployment or stage description"}) class SwaggerParameter(object): ''' This is a helper class for the Swagger Parameter Object ''' LOCATIONS = ('body', 'query', 'header', 'path') def __init__(self, paramdict): self._paramdict = paramdict @property def location(self): ''' returns location in the swagger parameter object ''' _location = self._paramdict.get('in') if _location in _Swagger.SwaggerParameter.LOCATIONS: return _location raise ValueError('Unsupported parameter location: {0} in Parameter Object'.format(_location)) @property def name(self): ''' returns parameter name in the swagger parameter object ''' _name = self._paramdict.get('name') if _name: if self.location == 'header': return 'method.request.header.{0}'.format(_name) elif self.location == 'query': return 'method.request.querystring.{0}'.format(_name) elif self.location == 'path': return 'method.request.path.{0}'.format(_name) return None raise ValueError('Parameter must have a name: {0}'.format(_dict_to_json_pretty(self._paramdict))) @property def schema(self): ''' returns the name of the schema given the reference in the swagger parameter object ''' if self.location == 'body': _schema = self._paramdict.get('schema') if _schema: if '$ref' in _schema: schema_name = _schema.get('$ref').split('/')[-1] return schema_name raise ValueError(('Body parameter must have a JSON reference ' 'to the schema definition due to Amazon API restrictions: {0}'.format(self.name))) raise ValueError('Body parameter must have a schema: {0}'.format(self.name)) return None class SwaggerMethodResponse(object): ''' Helper class for Swagger Method Response Object ''' def __init__(self, r): self._r = r @property def schema(self): ''' returns the name of the schema given the reference in the swagger method response object ''' _schema = self._r.get('schema') if _schema: if '$ref' in _schema: return _schema.get('$ref').split('/')[-1] raise ValueError(('Method response must have a JSON reference ' 'to the schema definition: {0}'.format(_schema))) return None @property def headers(self): ''' returns the headers dictionary in the method response object ''' _headers = self._r.get('headers', {}) return _headers def __init__(self, api_name, stage_name, lambda_funcname_format, swagger_file_path, error_response_template, response_template, common_aws_args): self._api_name = api_name self._stage_name = stage_name self._lambda_funcname_format = lambda_funcname_format self._common_aws_args = common_aws_args self._restApiId = '' self._deploymentId = '' self._error_response_template = error_response_template self._response_template = response_template if swagger_file_path is not None: if os.path.exists(swagger_file_path) and os.path.isfile(swagger_file_path): self._swagger_file = swagger_file_path self._md5_filehash = _gen_md5_filehash(self._swagger_file, error_response_template, response_template) with salt.utils.files.fopen(self._swagger_file, 'rb') as sf: self._cfg = salt.utils.yaml.safe_load(sf) self._swagger_version = '' else: raise IOError('Invalid swagger file path, {0}'.format(swagger_file_path)) self._validate_swagger_file() self._validate_lambda_funcname_format() self._resolve_api_id() def _is_http_error_rescode(self, code): ''' Helper function to determine if the passed code is in the 400~599 range of http error codes ''' return bool(re.match(r'^\s*[45]\d\d\s*$', code)) def _validate_error_response_model(self, paths, mods): ''' Helper function to help validate the convention established in the swagger file on how to handle response code mapping/integration ''' for path, ops in paths: for opname, opobj in six.iteritems(ops): if opname not in _Swagger.SWAGGER_OPERATION_NAMES: continue if 'responses' not in opobj: raise ValueError('missing mandatory responses field in path item object') for rescode, resobj in six.iteritems(opobj.get('responses')): if not self._is_http_error_rescode(str(rescode)): # future lint: disable=blacklisted-function continue # only check for response code from 400-599 if 'schema' not in resobj: raise ValueError('missing schema field in path {0}, ' 'op {1}, response {2}'.format(path, opname, rescode)) schemaobj = resobj.get('schema') if '$ref' not in schemaobj: raise ValueError('missing $ref field under schema in ' 'path {0}, op {1}, response {2}'.format(path, opname, rescode)) schemaobjref = schemaobj.get('$ref', '/') modelname = schemaobjref.split('/')[-1] if modelname not in mods: raise ValueError('model schema {0} reference not found ' 'under /definitions'.format(schemaobjref)) model = mods.get(modelname) if model.get('type') != 'object': raise ValueError('model schema {0} must be type object'.format(modelname)) if 'properties' not in model: raise ValueError('model schema {0} must have properties fields'.format(modelname)) modelprops = model.get('properties') if 'errorMessage' not in modelprops: raise ValueError('model schema {0} must have errorMessage as a property to ' 'match AWS convention. If pattern is not set, .+ will ' 'be used'.format(modelname)) def _validate_lambda_funcname_format(self): ''' Checks if the lambda function name format contains only known elements :return: True on success, ValueError raised on error ''' try: if self._lambda_funcname_format: known_kwargs = dict(stage='', api='', resource='', method='') self._lambda_funcname_format.format(**known_kwargs) return True except Exception: raise ValueError('Invalid lambda_funcname_format {0}. Please review ' 'documentation for known substitutable keys'.format(self._lambda_funcname_format)) def _validate_swagger_file(self): ''' High level check/validation of the input swagger file based on https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md This is not a full schema compliance check, but rather make sure that the input file (YAML or JSON) can be read into a dictionary, and we check for the content of the Swagger Object for version and info. ''' # check for any invalid fields for Swagger Object V2 for field in self._cfg: if (field not in _Swagger.SWAGGER_OBJ_V2_FIELDS and not _Swagger.VENDOR_EXT_PATTERN.match(field)): raise ValueError('Invalid Swagger Object Field: {0}'.format(field)) # check for Required Swagger fields by Saltstack boto apigateway state for field in _Swagger.SWAGGER_OBJ_V2_FIELDS_REQUIRED: if field not in self._cfg: raise ValueError('Missing Swagger Object Field: {0}'.format(field)) # check for Swagger Version self._swagger_version = self._cfg.get('swagger') if self._swagger_version not in _Swagger.SWAGGER_VERSIONS_SUPPORTED: raise ValueError('Unsupported Swagger version: {0},' 'Supported versions are {1}'.format(self._swagger_version, _Swagger.SWAGGER_VERSIONS_SUPPORTED)) log.info(type(self._models)) self._validate_error_response_model(self.paths, self._models()) @property def md5_filehash(self): ''' returns md5 hash for the swagger file ''' return self._md5_filehash @property def info(self): ''' returns the swagger info object as a dictionary ''' info = self._cfg.get('info') if not info: raise ValueError('Info Object has no values') return info @property def info_json(self): ''' returns the swagger info object as a pretty printed json string. ''' return _dict_to_json_pretty(self.info) @property def rest_api_name(self): ''' returns the name of the api ''' return self._api_name @property def rest_api_version(self): ''' returns the version field in the swagger info object ''' version = self.info.get('version') if not version: raise ValueError('Missing version value in Info Object') return version def _models(self): ''' returns an iterator for the models specified in the swagger file ''' models = self._cfg.get('definitions') if not models: raise ValueError('Definitions Object has no values, You need to define them in your swagger file') return models @property def paths(self): ''' returns an iterator for the relative resource paths specified in the swagger file ''' paths = self._cfg.get('paths') if not paths: raise ValueError('Paths Object has no values, You need to define them in your swagger file') for path in paths: if not path.startswith('/'): raise ValueError('Path object {0} should start with /. Please fix it'.format(path)) return six.iteritems(paths) @property def basePath(self): ''' returns the base path field as defined in the swagger file ''' basePath = self._cfg.get('basePath', '') return basePath @property def restApiId(self): ''' returns the rest api id as returned by AWS on creation of the rest api ''' return self._restApiId @restApiId.setter def restApiId(self, restApiId): ''' allows the assignment of the rest api id on creation of the rest api ''' self._restApiId = restApiId @property def deployment_label_json(self): ''' this property returns the unique description in pretty printed json for a particular api deployment ''' return _dict_to_json_pretty(self.deployment_label) @property def deployment_label(self): ''' this property returns the deployment label dictionary (mainly used by stage description) ''' label = dict() label['swagger_info_object'] = self.info label['api_name'] = self.rest_api_name label['swagger_file'] = os.path.basename(self._swagger_file) label['swagger_file_md5sum'] = self.md5_filehash return label # methods to interact with boto_apigateway execution modules def _one_or_more_stages_remain(self, deploymentId): ''' Helper function to find whether there are other stages still associated with a deployment ''' stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args).get('stages') return bool(stages) def no_more_deployments_remain(self): ''' Helper function to find whether there are deployments left with stages associated ''' no_more_deployments = True deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId, **self._common_aws_args).get('deployments') if deployments: for deployment in deployments: deploymentId = deployment.get('id') stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args).get('stages') if stages: no_more_deployments = False break return no_more_deployments def _get_current_deployment_id(self): ''' Helper method to find the deployment id that the stage name is currently assocaited with. ''' deploymentId = '' stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, **self._common_aws_args).get('stage') if stage: deploymentId = stage.get('deploymentId') return deploymentId def _get_current_deployment_label(self): ''' Helper method to find the deployment label that the stage_name is currently associated with. ''' deploymentId = self._get_current_deployment_id() deployment = __salt__['boto_apigateway.describe_api_deployment'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args).get('deployment') if deployment: return deployment.get('description') return None def _get_desired_deployment_id(self): ''' Helper method to return the deployment id matching the desired deployment label for this Swagger object based on the given api_name, swagger_file ''' deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId, **self._common_aws_args).get('deployments') if deployments: for deployment in deployments: if deployment.get('description') == self.deployment_label_json: return deployment.get('id') return '' def overwrite_stage_variables(self, ret, stage_variables): ''' overwrite the given stage_name's stage variables with the given stage_variables ''' res = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId, stageName=self._stage_name, variables=stage_variables, **self._common_aws_args) if not res.get('overwrite'): ret['result'] = False ret['abort'] = True ret['comment'] = res.get('error') else: ret = _log_changes(ret, 'overwrite_stage_variables', res.get('stage')) return ret def _set_current_deployment(self, stage_desc_json, stage_variables): ''' Helper method to associate the stage_name to the given deploymentId and make this current ''' stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, **self._common_aws_args).get('stage') if not stage: stage = __salt__['boto_apigateway.create_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, deploymentId=self._deploymentId, description=stage_desc_json, variables=stage_variables, **self._common_aws_args) if not stage.get('stage'): return {'set': False, 'error': stage.get('error')} else: # overwrite the stage variables overwrite = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId, stageName=self._stage_name, variables=stage_variables, **self._common_aws_args) if not overwrite.get('stage'): return {'set': False, 'error': overwrite.get('error')} return __salt__['boto_apigateway.activate_api_deployment'](restApiId=self.restApiId, stageName=self._stage_name, deploymentId=self._deploymentId, **self._common_aws_args) def _resolve_api_id(self): ''' returns an Api Id that matches the given api_name and the hardcoded _Swagger.AWS_API_DESCRIPTION as the api description ''' apis = __salt__['boto_apigateway.describe_apis'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args).get('restapi') if apis: if len(apis) == 1: self.restApiId = apis[0].get('id') else: raise ValueError('Multiple APIs matching given name {0} and ' 'description {1}'.format(self.rest_api_name, self.info_json)) def delete_stage(self, ret): ''' Method to delete the given stage_name. If the current deployment tied to the given stage_name has no other stages associated with it, the deployment will be removed as well ''' deploymentId = self._get_current_deployment_id() if deploymentId: result = __salt__['boto_apigateway.delete_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, **self._common_aws_args) if not result.get('deleted'): ret['abort'] = True ret['result'] = False ret['comment'] = 'delete_stage delete_api_stage, {0}'.format(result.get('error')) else: # check if it is safe to delete the deployment as well. if not self._one_or_more_stages_remain(deploymentId): result = __salt__['boto_apigateway.delete_api_deployment'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args) if not result.get('deleted'): ret['abort'] = True ret['result'] = False ret['comment'] = 'delete_stage delete_api_deployment, {0}'.format(result.get('error')) else: ret['comment'] = 'stage {0} has been deleted.\n'.format(self._stage_name) else: # no matching stage_name/deployment found ret['comment'] = 'stage {0} does not exist'.format(self._stage_name) return ret def verify_api(self, ret): ''' this method helps determine if the given stage_name is already on a deployment label matching the input api_name, swagger_file. If yes, returns abort with comment indicating already at desired state. If not and there is previous deployment labels in AWS matching the given input api_name and swagger file, indicate to the caller that we only need to reassociate stage_name to the previously existing deployment label. ''' if self.restApiId: deployed_label_json = self._get_current_deployment_label() if deployed_label_json == self.deployment_label_json: ret['comment'] = ('Already at desired state, the stage {0} is already at the desired ' 'deployment label:\n{1}'.format(self._stage_name, deployed_label_json)) ret['current'] = True return ret else: self._deploymentId = self._get_desired_deployment_id() if self._deploymentId: ret['publish'] = True return ret def publish_api(self, ret, stage_variables): ''' this method tie the given stage_name to a deployment matching the given swagger_file ''' stage_desc = dict() stage_desc['current_deployment_label'] = self.deployment_label stage_desc_json = _dict_to_json_pretty(stage_desc) if self._deploymentId: # just do a reassociate of stage_name to an already existing deployment res = self._set_current_deployment(stage_desc_json, stage_variables) if not res.get('set'): ret['abort'] = True ret['result'] = False ret['comment'] = res.get('error') else: ret = _log_changes(ret, 'publish_api (reassociate deployment, set stage_variables)', res.get('response')) else: # no deployment existed for the given swagger_file for this Swagger object res = __salt__['boto_apigateway.create_api_deployment'](restApiId=self.restApiId, stageName=self._stage_name, stageDescription=stage_desc_json, description=self.deployment_label_json, variables=stage_variables, **self._common_aws_args) if not res.get('created'): ret['abort'] = True ret['result'] = False ret['comment'] = res.get('error') else: ret = _log_changes(ret, 'publish_api (new deployment)', res.get('deployment')) return ret def _cleanup_api(self): ''' Helper method to clean up resources and models if we detected a change in the swagger file for a stage ''' resources = __salt__['boto_apigateway.describe_api_resources'](restApiId=self.restApiId, **self._common_aws_args) if resources.get('resources'): res = resources.get('resources')[1:] res.reverse() for resource in res: delres = __salt__['boto_apigateway.delete_api_resources'](restApiId=self.restApiId, path=resource.get('path'), **self._common_aws_args) if not delres.get('deleted'): return delres models = __salt__['boto_apigateway.describe_api_models'](restApiId=self.restApiId, **self._common_aws_args) if models.get('models'): for model in models.get('models'): delres = __salt__['boto_apigateway.delete_api_model'](restApiId=self.restApiId, modelName=model.get('name'), **self._common_aws_args) if not delres.get('deleted'): return delres return {'deleted': True} def deploy_api(self, ret): ''' this method create the top level rest api in AWS apigateway ''' if self.restApiId: res = self._cleanup_api() if not res.get('deleted'): ret['comment'] = 'Failed to cleanup restAreId {0}'.format(self.restApiId) ret['abort'] = True ret['result'] = False return ret return ret response = __salt__['boto_apigateway.create_api'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args) if not response.get('created'): ret['result'] = False ret['abort'] = True if 'error' in response: ret['comment'] = 'Failed to create rest api: {0}.'.format(response['error']['message']) return ret self.restApiId = response.get('restapi', {}).get('id') return _log_changes(ret, 'deploy_api', response.get('restapi')) def delete_api(self, ret): ''' Method to delete a Rest Api named defined in the swagger file's Info Object's title value. ret a dictionary for returning status to Saltstack ''' exists_response = __salt__['boto_apigateway.api_exists'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args) if exists_response.get('exists'): if __opts__['test']: ret['comment'] = 'Rest API named {0} is set to be deleted.'.format(self.rest_api_name) ret['result'] = None ret['abort'] = True return ret delete_api_response = __salt__['boto_apigateway.delete_api'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args) if not delete_api_response.get('deleted'): ret['result'] = False ret['abort'] = True if 'error' in delete_api_response: ret['comment'] = 'Failed to delete rest api: {0}.'.format(delete_api_response['error']['message']) return ret ret = _log_changes(ret, 'delete_api', delete_api_response) else: ret['comment'] = ('api already absent for swagger file: ' '{0}, desc: {1}'.format(self.rest_api_name, self.info_json)) return ret def _aws_model_ref_from_swagger_ref(self, r): ''' Helper function to reference models created on aws apigw ''' model_name = r.split('/')[-1] return 'https://apigateway.amazonaws.com/restapis/{0}/models/{1}'.format(self.restApiId, model_name) def _update_schema_to_aws_notation(self, schema): ''' Helper function to map model schema to aws notation ''' result = {} for k, v in schema.items(): if k == '$ref': v = self._aws_model_ref_from_swagger_ref(v) if isinstance(v, dict): v = self._update_schema_to_aws_notation(v) result[k] = v return result def _build_dependent_model_list(self, obj_schema): ''' Helper function to build the list of models the given object schema is referencing. ''' dep_models_list = [] if obj_schema: obj_schema['type'] = obj_schema.get('type', 'object') if obj_schema['type'] == 'array': dep_models_list.extend(self._build_dependent_model_list(obj_schema.get('items', {}))) else: ref = obj_schema.get('$ref') if ref: ref_obj_model = ref.split("/")[-1] ref_obj_schema = self._models().get(ref_obj_model) dep_models_list.extend(self._build_dependent_model_list(ref_obj_schema)) dep_models_list.extend([ref_obj_model]) else: # need to walk each property object properties = obj_schema.get('properties') if properties: for _, prop_obj_schema in six.iteritems(properties): dep_models_list.extend(self._build_dependent_model_list(prop_obj_schema)) return list(set(dep_models_list)) def _build_all_dependencies(self): ''' Helper function to build a map of model to their list of model reference dependencies ''' ret = {} for model, schema in six.iteritems(self._models()): dep_list = self._build_dependent_model_list(schema) ret[model] = dep_list return ret def _get_model_without_dependencies(self, models_dict): ''' Helper function to find the next model that should be created ''' next_model = None if not models_dict: return next_model for model, dependencies in six.iteritems(models_dict): if dependencies == []: next_model = model break if next_model is None: raise ValueError('incomplete model definitions, models in dependency ' 'list not defined: {0}'.format(models_dict)) # remove the model from other depednencies before returning models_dict.pop(next_model) for model, dep_list in six.iteritems(models_dict): if next_model in dep_list: dep_list.remove(next_model) return next_model def deploy_models(self, ret): ''' Method to deploy swagger file's definition objects and associated schema to AWS Apigateway as Models ret a dictionary for returning status to Saltstack ''' for model, schema in self.models(): # add in a few attributes into the model schema that AWS expects # _schema = schema.copy() _schema = self._update_schema_to_aws_notation(schema) _schema.update({'$schema': _Swagger.JSON_SCHEMA_DRAFT_4, 'title': '{0} Schema'.format(model)}) # check to see if model already exists, aws has 2 default models [Empty, Error] # which may need upate with data from swagger file model_exists_response = __salt__['boto_apigateway.api_model_exists'](restApiId=self.restApiId, modelName=model, **self._common_aws_args) if model_exists_response.get('exists'): update_model_schema_response = ( __salt__['boto_apigateway.update_api_model_schema'](restApiId=self.restApiId, modelName=model, schema=_dict_to_json_pretty(_schema), **self._common_aws_args)) if not update_model_schema_response.get('updated'): ret['result'] = False ret['abort'] = True if 'error' in update_model_schema_response: ret['comment'] = ('Failed to update existing model {0} with schema {1}, ' 'error: {2}'.format(model, _dict_to_json_pretty(schema), update_model_schema_response['error']['message'])) return ret ret = _log_changes(ret, 'deploy_models', update_model_schema_response) else: create_model_response = ( __salt__['boto_apigateway.create_api_model'](restApiId=self.restApiId, modelName=model, modelDescription=model, schema=_dict_to_json_pretty(_schema), contentType='application/json', **self._common_aws_args)) if not create_model_response.get('created'): ret['result'] = False ret['abort'] = True if 'error' in create_model_response: ret['comment'] = ('Failed to create model {0}, schema {1}, ' 'error: {2}'.format(model, _dict_to_json_pretty(schema), create_model_response['error']['message'])) return ret ret = _log_changes(ret, 'deploy_models', create_model_response) return ret def _lambda_name(self, resourcePath, httpMethod): ''' Helper method to construct lambda name based on the rule specified in doc string of boto_apigateway.api_present function ''' lambda_name = self._lambda_funcname_format.format(stage=self._stage_name, api=self.rest_api_name, resource=resourcePath, method=httpMethod) lambda_name = lambda_name.strip() lambda_name = re.sub(r'{|}', '', lambda_name) lambda_name = re.sub(r'\s+|/', '_', lambda_name).lower() return re.sub(r'_+', '_', lambda_name) def _lambda_uri(self, lambda_name, lambda_region): ''' Helper Method to construct the lambda uri for use in method integration ''' profile = self._common_aws_args.get('profile') region = self._common_aws_args.get('region') lambda_region = __utils__['boto3.get_region']('lambda', lambda_region, profile) apigw_region = __utils__['boto3.get_region']('apigateway', region, profile) lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args) if lambda_region != apigw_region: if not lambda_desc.get('function'): # try look up in the same region as the apigateway as well if previous lookup failed lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args) if not lambda_desc.get('function'): raise ValueError('Could not find lambda function {0} in ' 'regions [{1}, {2}].'.format(lambda_name, lambda_region, apigw_region)) lambda_arn = lambda_desc.get('function').get('FunctionArn') lambda_uri = ('arn:aws:apigateway:{0}:lambda:path/2015-03-31' '/functions/{1}/invocations'.format(apigw_region, lambda_arn)) return lambda_uri def _parse_method_data(self, method_name, method_data): ''' Helper function to construct the method request params, models, request_templates and integration_type values needed to configure method request integration/mappings. ''' method_params = {} method_models = {} if 'parameters' in method_data: for param in method_data['parameters']: p = _Swagger.SwaggerParameter(param) if p.name: method_params[p.name] = True if p.schema: method_models['application/json'] = p.schema request_templates = _Swagger.REQUEST_OPTION_TEMPLATE if method_name == 'options' else _Swagger.REQUEST_TEMPLATE integration_type = "MOCK" if method_name == 'options' else "AWS" return {'params': method_params, 'models': method_models, 'request_templates': request_templates, 'integration_type': integration_type} def _find_patterns(self, o): result = [] if isinstance(o, dict): for k, v in six.iteritems(o): if isinstance(v, dict): result.extend(self._find_patterns(v)) else: if k == 'pattern': result.append(v) return result def _get_pattern_for_schema(self, schema_name, httpStatus): ''' returns the pattern specified in a response schema ''' defaultPattern = '.+' if self._is_http_error_rescode(httpStatus) else '.*' model = self._models().get(schema_name) patterns = self._find_patterns(model) return patterns[0] if patterns else defaultPattern def _get_response_template(self, method_name, http_status): if method_name == 'options' or not self._is_http_error_rescode(http_status): response_templates = {'application/json': self._response_template} \ if self._response_template else self.RESPONSE_OPTION_TEMPLATE else: response_templates = {'application/json': self._error_response_template} \ if self._error_response_template else self.RESPONSE_TEMPLATE return response_templates def _parse_method_response(self, method_name, method_response, httpStatus): ''' Helper function to construct the method response params, models, and integration_params values needed to configure method response integration/mappings. ''' method_response_models = {} method_response_pattern = '.*' if method_response.schema: method_response_models['application/json'] = method_response.schema method_response_pattern = self._get_pattern_for_schema(method_response.schema, httpStatus) method_response_params = {} method_integration_response_params = {} for header in method_response.headers: response_header = 'method.response.header.{0}'.format(header) method_response_params[response_header] = False header_data = method_response.headers.get(header) method_integration_response_params[response_header] = ( "'{0}'".format(header_data.get('default')) if 'default' in header_data else "'*'") response_templates = self._get_response_template(method_name, httpStatus) return {'params': method_response_params, 'models': method_response_models, 'integration_params': method_integration_response_params, 'pattern': method_response_pattern, 'response_templates': response_templates} def _deploy_method(self, ret, resource_path, method_name, method_data, api_key_required, lambda_integration_role, lambda_region, authorization_type): ''' Method to create a method for the given resource path, along with its associated request and response integrations. ret a dictionary for returning status to Saltstack resource_path the full resource path where the named method_name will be associated with. method_name a string that is one of the following values: 'delete', 'get', 'head', 'options', 'patch', 'post', 'put' method_data the value dictionary for this method in the swagger definition file. api_key_required True or False, whether api key is required to access this method. lambda_integration_role name of the IAM role or IAM role arn that Api Gateway will assume when executing the associated lambda function lambda_region the region for the lambda function that Api Gateway will integrate to. authorization_type 'NONE' or 'AWS_IAM' ''' method = self._parse_method_data(method_name.lower(), method_data) # for options method to enable CORS, api_key_required will be set to False always. # authorization_type will be set to 'NONE' always. if method_name.lower() == 'options': api_key_required = False authorization_type = 'NONE' m = __salt__['boto_apigateway.create_api_method'](restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), authorizationType=authorization_type, apiKeyRequired=api_key_required, requestParameters=method.get('params'), requestModels=method.get('models'), **self._common_aws_args) if not m.get('created'): ret = _log_error_and_abort(ret, m) return ret ret = _log_changes(ret, '_deploy_method.create_api_method', m) lambda_uri = "" if method_name.lower() != 'options': lambda_uri = self._lambda_uri(self._lambda_name(resource_path, method_name), lambda_region=lambda_region) # NOTE: integration method is set to POST always, as otherwise AWS makes wrong assumptions # about the intent of the call. HTTP method will be passed to lambda as part of the API gateway context integration = ( __salt__['boto_apigateway.create_api_integration'](restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), integrationType=method.get('integration_type'), integrationHttpMethod='POST', uri=lambda_uri, credentials=lambda_integration_role, requestTemplates=method.get('request_templates'), **self._common_aws_args)) if not integration.get('created'): ret = _log_error_and_abort(ret, integration) return ret ret = _log_changes(ret, '_deploy_method.create_api_integration', integration) if 'responses' in method_data: for response, response_data in six.iteritems(method_data['responses']): httpStatus = str(response) # future lint: disable=blacklisted-function method_response = self._parse_method_response(method_name.lower(), _Swagger.SwaggerMethodResponse(response_data), httpStatus) mr = __salt__['boto_apigateway.create_api_method_response']( restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), statusCode=httpStatus, responseParameters=method_response.get('params'), responseModels=method_response.get('models'), **self._common_aws_args) if not mr.get('created'): ret = _log_error_and_abort(ret, mr) return ret ret = _log_changes(ret, '_deploy_method.create_api_method_response', mr) mir = __salt__['boto_apigateway.create_api_integration_response']( restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), statusCode=httpStatus, selectionPattern=method_response.get('pattern'), responseParameters=method_response.get('integration_params'), responseTemplates=method_response.get('response_templates'), **self._common_aws_args) if not mir.get('created'): ret = _log_error_and_abort(ret, mir) return ret ret = _log_changes(ret, '_deploy_method.create_api_integration_response', mir) else: raise ValueError('No responses specified for {0} {1}'.format(resource_path, method_name)) return ret def deploy_resources(self, ret, api_key_required, lambda_integration_role, lambda_region, authorization_type): ''' Method to deploy resources defined in the swagger file. ret a dictionary for returning status to Saltstack api_key_required True or False, whether api key is required to access this method. lambda_integration_role name of the IAM role or IAM role arn that Api Gateway will assume when executing the associated lambda function lambda_region the region for the lambda function that Api Gateway will integrate to. authorization_type 'NONE' or 'AWS_IAM' ''' for path, pathData in self.paths: resource = __salt__['boto_apigateway.create_api_resources'](restApiId=self.restApiId, path=path, **self._common_aws_args) if not resource.get('created'): ret = _log_error_and_abort(ret, resource) return ret ret = _log_changes(ret, 'deploy_resources', resource) for method, method_data in six.iteritems(pathData): if method in _Swagger.SWAGGER_OPERATION_NAMES: ret = self._deploy_method(ret, path, method, method_data, api_key_required, lambda_integration_role, lambda_region, authorization_type) return ret