repository_name
stringclasses
316 values
func_path_in_repository
stringlengths
6
223
func_name
stringlengths
1
134
language
stringclasses
1 value
func_code_string
stringlengths
57
65.5k
func_documentation_string
stringlengths
1
46.3k
split_name
stringclasses
1 value
func_code_url
stringlengths
91
315
called_functions
listlengths
1
156
enclosing_scope
stringlengths
2
1.48M
saltstack/salt
salt/ext/ipaddress.py
IPv6Address.is_reserved
python
def is_reserved(self): reserved_networks = [IPv6Network('::/8'), IPv6Network('100::/8'), IPv6Network('200::/7'), IPv6Network('400::/6'), IPv6Network('800::/5'), IPv6Network('1000::/4'), IPv6Network('4000::/3'), IPv6Network('6000::/3'), IPv6Network('8000::/3'), IPv6Network('A000::/3'), IPv6Network('C000::/3'), IPv6Network('E000::/4'), IPv6Network('F000::/5'), IPv6Network('F800::/6'), IPv6Network('FE00::/9')] return any(self in x for x in reserved_networks)
Test if the address is otherwise IETF reserved. Returns: A boolean, True if the address is within one of the reserved IPv6 Network ranges.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/ext/ipaddress.py#L1948-L1965
null
class IPv6Address(_BaseV6, _BaseAddress): """Represent and manipulate single IPv6 Addresses.""" def __init__(self, address): """Instantiate a new IPv6 address object. Args: address: A string or integer representing the IP Additionally, an integer can be passed, so IPv6Address('2001:db8::') == IPv6Address(42540766411282592856903984951653826560) or, more generally IPv6Address(int(IPv6Address('2001:db8::'))) == IPv6Address('2001:db8::') Raises: AddressValueError: If address isn't a valid IPv6 address. """ _BaseAddress.__init__(self, address) _BaseV6.__init__(self, address) # Efficient constructor from integer. if isinstance(address, int): self._check_int_address(address) self._ip = address return # Constructing from a packed address if isinstance(address, bytes): self._check_packed_address(address, 16) self._ip = _int_from_bytes(address, 'big') return # Assume input argument to be string or any object representation # which converts into a formatted IP string. addr_str = str(address) self._ip = self._ip_int_from_string(addr_str) @property def packed(self): """The binary representation of this address.""" return v6_int_to_packed(self._ip) @property def is_multicast(self): """Test if the address is reserved for multicast use. Returns: A boolean, True if the address is a multicast address. See RFC 2373 2.7 for details. """ multicast_network = IPv6Network('ff00::/8') return self in multicast_network @property @property def is_link_local(self): """Test if the address is reserved for link-local. Returns: A boolean, True if the address is reserved per RFC 4291. """ linklocal_network = IPv6Network('fe80::/10') return self in linklocal_network @property def is_site_local(self): """Test if the address is reserved for site-local. Note that the site-local address space has been deprecated by RFC 3879. Use is_private to test if this address is in the space of unique local addresses as defined by RFC 4193. Returns: A boolean, True if the address is reserved per RFC 3513 2.5.6. """ sitelocal_network = IPv6Network('fec0::/10') return self in sitelocal_network @property def is_private(self): """Test if this address is allocated for private networks. Returns: A boolean, True if the address is reserved per iana-ipv6-special-registry. """ return (self in IPv6Network('::1/128') or self in IPv6Network('::/128') or self in IPv6Network('::ffff:0:0/96') or self in IPv6Network('100::/64') or self in IPv6Network('2001::/23') or self in IPv6Network('2001:2::/48') or self in IPv6Network('2001:db8::/32') or self in IPv6Network('2001:10::/28') or self in IPv6Network('fc00::/7') or self in IPv6Network('fe80::/10')) @property def is_global(self): """Test if this address is allocated for public networks. Returns: A boolean, true if the address is not reserved per iana-ipv6-special-registry. """ return not self.is_private @property def is_unspecified(self): """Test if the address is unspecified. Returns: A boolean, True if this is the unspecified address as defined in RFC 2373 2.5.2. """ return self._ip == 0 @property def is_loopback(self): """Test if the address is a loopback address. Returns: A boolean, True if the address is a loopback address as defined in RFC 2373 2.5.3. """ return self._ip == 1 @property def ipv4_mapped(self): """Return the IPv4 mapped address. Returns: If the IPv6 address is a v4 mapped address, return the IPv4 mapped address. Return None otherwise. """ if (self._ip >> 32) != 0xFFFF: return None return IPv4Address(self._ip & 0xFFFFFFFF) @property def teredo(self): """Tuple of embedded teredo IPs. Returns: Tuple of the (server, client) IPs or None if the address doesn't appear to be a teredo address (doesn't start with 2001::/32) """ if (self._ip >> 96) != 0x20010000: return None return (IPv4Address((self._ip >> 64) & 0xFFFFFFFF), IPv4Address(~self._ip & 0xFFFFFFFF)) @property def sixtofour(self): """Return the IPv4 6to4 embedded address. Returns: The IPv4 6to4-embedded address if present or None if the address doesn't appear to contain a 6to4 embedded address. """ if (self._ip >> 112) != 0x2002: return None return IPv4Address((self._ip >> 80) & 0xFFFFFFFF)
saltstack/salt
salt/ext/ipaddress.py
IPv6Address.is_private
python
def is_private(self): return (self in IPv6Network('::1/128') or self in IPv6Network('::/128') or self in IPv6Network('::ffff:0:0/96') or self in IPv6Network('100::/64') or self in IPv6Network('2001::/23') or self in IPv6Network('2001:2::/48') or self in IPv6Network('2001:db8::/32') or self in IPv6Network('2001:10::/28') or self in IPv6Network('fc00::/7') or self in IPv6Network('fe80::/10'))
Test if this address is allocated for private networks. Returns: A boolean, True if the address is reserved per iana-ipv6-special-registry.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/ext/ipaddress.py#L1994-L2011
null
class IPv6Address(_BaseV6, _BaseAddress): """Represent and manipulate single IPv6 Addresses.""" def __init__(self, address): """Instantiate a new IPv6 address object. Args: address: A string or integer representing the IP Additionally, an integer can be passed, so IPv6Address('2001:db8::') == IPv6Address(42540766411282592856903984951653826560) or, more generally IPv6Address(int(IPv6Address('2001:db8::'))) == IPv6Address('2001:db8::') Raises: AddressValueError: If address isn't a valid IPv6 address. """ _BaseAddress.__init__(self, address) _BaseV6.__init__(self, address) # Efficient constructor from integer. if isinstance(address, int): self._check_int_address(address) self._ip = address return # Constructing from a packed address if isinstance(address, bytes): self._check_packed_address(address, 16) self._ip = _int_from_bytes(address, 'big') return # Assume input argument to be string or any object representation # which converts into a formatted IP string. addr_str = str(address) self._ip = self._ip_int_from_string(addr_str) @property def packed(self): """The binary representation of this address.""" return v6_int_to_packed(self._ip) @property def is_multicast(self): """Test if the address is reserved for multicast use. Returns: A boolean, True if the address is a multicast address. See RFC 2373 2.7 for details. """ multicast_network = IPv6Network('ff00::/8') return self in multicast_network @property def is_reserved(self): """Test if the address is otherwise IETF reserved. Returns: A boolean, True if the address is within one of the reserved IPv6 Network ranges. """ reserved_networks = [IPv6Network('::/8'), IPv6Network('100::/8'), IPv6Network('200::/7'), IPv6Network('400::/6'), IPv6Network('800::/5'), IPv6Network('1000::/4'), IPv6Network('4000::/3'), IPv6Network('6000::/3'), IPv6Network('8000::/3'), IPv6Network('A000::/3'), IPv6Network('C000::/3'), IPv6Network('E000::/4'), IPv6Network('F000::/5'), IPv6Network('F800::/6'), IPv6Network('FE00::/9')] return any(self in x for x in reserved_networks) @property def is_link_local(self): """Test if the address is reserved for link-local. Returns: A boolean, True if the address is reserved per RFC 4291. """ linklocal_network = IPv6Network('fe80::/10') return self in linklocal_network @property def is_site_local(self): """Test if the address is reserved for site-local. Note that the site-local address space has been deprecated by RFC 3879. Use is_private to test if this address is in the space of unique local addresses as defined by RFC 4193. Returns: A boolean, True if the address is reserved per RFC 3513 2.5.6. """ sitelocal_network = IPv6Network('fec0::/10') return self in sitelocal_network @property @property def is_global(self): """Test if this address is allocated for public networks. Returns: A boolean, true if the address is not reserved per iana-ipv6-special-registry. """ return not self.is_private @property def is_unspecified(self): """Test if the address is unspecified. Returns: A boolean, True if this is the unspecified address as defined in RFC 2373 2.5.2. """ return self._ip == 0 @property def is_loopback(self): """Test if the address is a loopback address. Returns: A boolean, True if the address is a loopback address as defined in RFC 2373 2.5.3. """ return self._ip == 1 @property def ipv4_mapped(self): """Return the IPv4 mapped address. Returns: If the IPv6 address is a v4 mapped address, return the IPv4 mapped address. Return None otherwise. """ if (self._ip >> 32) != 0xFFFF: return None return IPv4Address(self._ip & 0xFFFFFFFF) @property def teredo(self): """Tuple of embedded teredo IPs. Returns: Tuple of the (server, client) IPs or None if the address doesn't appear to be a teredo address (doesn't start with 2001::/32) """ if (self._ip >> 96) != 0x20010000: return None return (IPv4Address((self._ip >> 64) & 0xFFFFFFFF), IPv4Address(~self._ip & 0xFFFFFFFF)) @property def sixtofour(self): """Return the IPv4 6to4 embedded address. Returns: The IPv4 6to4-embedded address if present or None if the address doesn't appear to contain a 6to4 embedded address. """ if (self._ip >> 112) != 0x2002: return None return IPv4Address((self._ip >> 80) & 0xFFFFFFFF)
saltstack/salt
salt/ext/ipaddress.py
IPv6Network.hosts
python
def hosts(self): network = int(self.network_address) broadcast = int(self.broadcast_address) for x in long_range(1, broadcast - network + 1): yield self._address_class(network + x)
Generate Iterator over usable hosts in a network. This is like __iter__ except it doesn't return the Subnet-Router anycast address.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/ext/ipaddress.py#L2250-L2260
[ "def long_range(start, end):\n while start < end:\n yield start\n start += 1\n" ]
class IPv6Network(_BaseV6, _BaseNetwork): """This class represents and manipulates 128-bit IPv6 networks. Attributes: [examples for IPv6('2001:db8::1000/124')] .network_address: IPv6Address('2001:db8::1000') .hostmask: IPv6Address('::f') .broadcast_address: IPv6Address('2001:db8::100f') .netmask: IPv6Address('ffff:ffff:ffff:ffff:ffff:ffff:ffff:fff0') .prefixlen: 124 """ # Class to use when creating address objects _address_class = IPv6Address def __init__(self, address, strict=True): """Instantiate a new IPv6 Network object. Args: address: A string or integer representing the IPv6 network or the IP and prefix/netmask. '2001:db8::/128' '2001:db8:0000:0000:0000:0000:0000:0000/128' '2001:db8::' are all functionally the same in IPv6. That is to say, failing to provide a subnetmask will create an object with a mask of /128. Additionally, an integer can be passed, so IPv6Network('2001:db8::') == IPv6Network(42540766411282592856903984951653826560) or, more generally IPv6Network(int(IPv6Network('2001:db8::'))) == IPv6Network('2001:db8::') strict: A boolean. If true, ensure that we have been passed A true network address, eg, 2001:db8::1000/124 and not an IP address on a network, eg, 2001:db8::1/124. Raises: AddressValueError: If address isn't a valid IPv6 address. NetmaskValueError: If the netmask isn't valid for an IPv6 address. ValueError: If strict was True and a network address was not supplied. """ _BaseV6.__init__(self, address) _BaseNetwork.__init__(self, address) # Efficient constructor from integer. if isinstance(address, int): self.network_address = IPv6Address(address) self._prefixlen = self._max_prefixlen self.netmask = IPv6Address(self._ALL_ONES) return # Constructing from a packed address if isinstance(address, bytes): self.network_address = IPv6Address(address) self._prefixlen = self._max_prefixlen self.netmask = IPv6Address(self._ALL_ONES) return # Assume input argument to be string or any object representation # which converts into a formatted IP prefix string. addr = _split_optional_netmask(address) self.network_address = IPv6Address(self._ip_int_from_string(addr[0])) if len(addr) == 2: # This may raise NetmaskValueError self._prefixlen = self._prefix_from_prefix_string(addr[1]) else: self._prefixlen = self._max_prefixlen self.netmask = IPv6Address(self._ip_int_from_prefix(self._prefixlen)) if strict: if (IPv6Address(int(self.network_address) & int(self.netmask)) != self.network_address): raise ValueError('%s has host bits set' % self) self.network_address = IPv6Address(int(self.network_address) & int(self.netmask)) if self._prefixlen == (self._max_prefixlen - 1): self.hosts = self.__iter__ @property def is_site_local(self): """Test if the address is reserved for site-local. Note that the site-local address space has been deprecated by RFC 3879. Use is_private to test if this address is in the space of unique local addresses as defined by RFC 4193. Returns: A boolean, True if the address is reserved per RFC 3513 2.5.6. """ return (self.network_address.is_site_local and self.broadcast_address.is_site_local)
saltstack/salt
salt/beacons/network_info.py
_to_list
python
def _to_list(obj): ''' Convert snetinfo object to list ''' ret = {} for attr in __attrs: if hasattr(obj, attr): ret[attr] = getattr(obj, attr) return ret
Convert snetinfo object to list
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/beacons/network_info.py#L33-L42
null
# -*- coding: utf-8 -*- ''' Beacon to monitor statistics from ethernet adapters .. versionadded:: 2015.5.0 ''' # Import Python libs from __future__ import absolute_import, unicode_literals import logging # Import third party libs # pylint: disable=import-error try: import salt.utils.psutil_compat as psutil HAS_PSUTIL = True except ImportError: HAS_PSUTIL = False from salt.ext.six.moves import map # pylint: enable=import-error log = logging.getLogger(__name__) __virtualname__ = 'network_info' __attrs = ['bytes_sent', 'bytes_recv', 'packets_sent', 'packets_recv', 'errin', 'errout', 'dropin', 'dropout'] def __virtual__(): if not HAS_PSUTIL: return (False, 'cannot load network_info beacon: psutil not available') return __virtualname__ def validate(config): ''' Validate the beacon configuration ''' VALID_ITEMS = [ 'type', 'bytes_sent', 'bytes_recv', 'packets_sent', 'packets_recv', 'errin', 'errout', 'dropin', 'dropout' ] # Configuration for load beacon should be a list of dicts if not isinstance(config, list): return False, ('Configuration for network_info beacon must be a list.') else: _config = {} list(map(_config.update, config)) for item in _config.get('interfaces', {}): if not isinstance(_config['interfaces'][item], dict): return False, ('Configuration for network_info beacon must ' 'be a list of dictionaries.') else: if not any(j in VALID_ITEMS for j in _config['interfaces'][item]): return False, ('Invalid configuration item in ' 'Beacon configuration.') return True, 'Valid beacon configuration' def beacon(config): ''' Emit the network statistics of this host. Specify thresholds for each network stat and only emit a beacon if any of them are exceeded. Emit beacon when any values are equal to configured values. .. code-block:: yaml beacons: network_info: - interfaces: eth0: type: equal bytes_sent: 100000 bytes_recv: 100000 packets_sent: 100000 packets_recv: 100000 errin: 100 errout: 100 dropin: 100 dropout: 100 Emit beacon when any values are greater than configured values. .. code-block:: yaml beacons: network_info: - interfaces: eth0: type: greater bytes_sent: 100000 bytes_recv: 100000 packets_sent: 100000 packets_recv: 100000 errin: 100 errout: 100 dropin: 100 dropout: 100 ''' ret = [] _config = {} list(map(_config.update, config)) log.debug('psutil.net_io_counters %s', psutil.net_io_counters) _stats = psutil.net_io_counters(pernic=True) log.debug('_stats %s', _stats) for interface in _config.get('interfaces', {}): if interface in _stats: interface_config = _config['interfaces'][interface] _if_stats = _stats[interface] _diff = False for attr in __attrs: if attr in interface_config: if 'type' in interface_config and \ interface_config['type'] == 'equal': if getattr(_if_stats, attr, None) == \ int(interface_config[attr]): _diff = True elif 'type' in interface_config and \ interface_config['type'] == 'greater': if getattr(_if_stats, attr, None) > \ int(interface_config[attr]): _diff = True else: log.debug('attr %s', getattr(_if_stats, attr, None)) else: if getattr(_if_stats, attr, None) == \ int(interface_config[attr]): _diff = True if _diff: ret.append({'interface': interface, 'network_info': _to_list(_if_stats)}) return ret
saltstack/salt
salt/beacons/network_info.py
validate
python
def validate(config): ''' Validate the beacon configuration ''' VALID_ITEMS = [ 'type', 'bytes_sent', 'bytes_recv', 'packets_sent', 'packets_recv', 'errin', 'errout', 'dropin', 'dropout' ] # Configuration for load beacon should be a list of dicts if not isinstance(config, list): return False, ('Configuration for network_info beacon must be a list.') else: _config = {} list(map(_config.update, config)) for item in _config.get('interfaces', {}): if not isinstance(_config['interfaces'][item], dict): return False, ('Configuration for network_info beacon must ' 'be a list of dictionaries.') else: if not any(j in VALID_ITEMS for j in _config['interfaces'][item]): return False, ('Invalid configuration item in ' 'Beacon configuration.') return True, 'Valid beacon configuration'
Validate the beacon configuration
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/beacons/network_info.py#L51-L78
null
# -*- coding: utf-8 -*- ''' Beacon to monitor statistics from ethernet adapters .. versionadded:: 2015.5.0 ''' # Import Python libs from __future__ import absolute_import, unicode_literals import logging # Import third party libs # pylint: disable=import-error try: import salt.utils.psutil_compat as psutil HAS_PSUTIL = True except ImportError: HAS_PSUTIL = False from salt.ext.six.moves import map # pylint: enable=import-error log = logging.getLogger(__name__) __virtualname__ = 'network_info' __attrs = ['bytes_sent', 'bytes_recv', 'packets_sent', 'packets_recv', 'errin', 'errout', 'dropin', 'dropout'] def _to_list(obj): ''' Convert snetinfo object to list ''' ret = {} for attr in __attrs: if hasattr(obj, attr): ret[attr] = getattr(obj, attr) return ret def __virtual__(): if not HAS_PSUTIL: return (False, 'cannot load network_info beacon: psutil not available') return __virtualname__ def beacon(config): ''' Emit the network statistics of this host. Specify thresholds for each network stat and only emit a beacon if any of them are exceeded. Emit beacon when any values are equal to configured values. .. code-block:: yaml beacons: network_info: - interfaces: eth0: type: equal bytes_sent: 100000 bytes_recv: 100000 packets_sent: 100000 packets_recv: 100000 errin: 100 errout: 100 dropin: 100 dropout: 100 Emit beacon when any values are greater than configured values. .. code-block:: yaml beacons: network_info: - interfaces: eth0: type: greater bytes_sent: 100000 bytes_recv: 100000 packets_sent: 100000 packets_recv: 100000 errin: 100 errout: 100 dropin: 100 dropout: 100 ''' ret = [] _config = {} list(map(_config.update, config)) log.debug('psutil.net_io_counters %s', psutil.net_io_counters) _stats = psutil.net_io_counters(pernic=True) log.debug('_stats %s', _stats) for interface in _config.get('interfaces', {}): if interface in _stats: interface_config = _config['interfaces'][interface] _if_stats = _stats[interface] _diff = False for attr in __attrs: if attr in interface_config: if 'type' in interface_config and \ interface_config['type'] == 'equal': if getattr(_if_stats, attr, None) == \ int(interface_config[attr]): _diff = True elif 'type' in interface_config and \ interface_config['type'] == 'greater': if getattr(_if_stats, attr, None) > \ int(interface_config[attr]): _diff = True else: log.debug('attr %s', getattr(_if_stats, attr, None)) else: if getattr(_if_stats, attr, None) == \ int(interface_config[attr]): _diff = True if _diff: ret.append({'interface': interface, 'network_info': _to_list(_if_stats)}) return ret
saltstack/salt
salt/beacons/network_info.py
beacon
python
def beacon(config): ''' Emit the network statistics of this host. Specify thresholds for each network stat and only emit a beacon if any of them are exceeded. Emit beacon when any values are equal to configured values. .. code-block:: yaml beacons: network_info: - interfaces: eth0: type: equal bytes_sent: 100000 bytes_recv: 100000 packets_sent: 100000 packets_recv: 100000 errin: 100 errout: 100 dropin: 100 dropout: 100 Emit beacon when any values are greater than configured values. .. code-block:: yaml beacons: network_info: - interfaces: eth0: type: greater bytes_sent: 100000 bytes_recv: 100000 packets_sent: 100000 packets_recv: 100000 errin: 100 errout: 100 dropin: 100 dropout: 100 ''' ret = [] _config = {} list(map(_config.update, config)) log.debug('psutil.net_io_counters %s', psutil.net_io_counters) _stats = psutil.net_io_counters(pernic=True) log.debug('_stats %s', _stats) for interface in _config.get('interfaces', {}): if interface in _stats: interface_config = _config['interfaces'][interface] _if_stats = _stats[interface] _diff = False for attr in __attrs: if attr in interface_config: if 'type' in interface_config and \ interface_config['type'] == 'equal': if getattr(_if_stats, attr, None) == \ int(interface_config[attr]): _diff = True elif 'type' in interface_config and \ interface_config['type'] == 'greater': if getattr(_if_stats, attr, None) > \ int(interface_config[attr]): _diff = True else: log.debug('attr %s', getattr(_if_stats, attr, None)) else: if getattr(_if_stats, attr, None) == \ int(interface_config[attr]): _diff = True if _diff: ret.append({'interface': interface, 'network_info': _to_list(_if_stats)}) return ret
Emit the network statistics of this host. Specify thresholds for each network stat and only emit a beacon if any of them are exceeded. Emit beacon when any values are equal to configured values. .. code-block:: yaml beacons: network_info: - interfaces: eth0: type: equal bytes_sent: 100000 bytes_recv: 100000 packets_sent: 100000 packets_recv: 100000 errin: 100 errout: 100 dropin: 100 dropout: 100 Emit beacon when any values are greater than configured values. .. code-block:: yaml beacons: network_info: - interfaces: eth0: type: greater bytes_sent: 100000 bytes_recv: 100000 packets_sent: 100000 packets_recv: 100000 errin: 100 errout: 100 dropin: 100 dropout: 100
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/beacons/network_info.py#L81-L166
[ "def _to_list(obj):\n '''\n Convert snetinfo object to list\n '''\n ret = {}\n\n for attr in __attrs:\n if hasattr(obj, attr):\n ret[attr] = getattr(obj, attr)\n return ret\n" ]
# -*- coding: utf-8 -*- ''' Beacon to monitor statistics from ethernet adapters .. versionadded:: 2015.5.0 ''' # Import Python libs from __future__ import absolute_import, unicode_literals import logging # Import third party libs # pylint: disable=import-error try: import salt.utils.psutil_compat as psutil HAS_PSUTIL = True except ImportError: HAS_PSUTIL = False from salt.ext.six.moves import map # pylint: enable=import-error log = logging.getLogger(__name__) __virtualname__ = 'network_info' __attrs = ['bytes_sent', 'bytes_recv', 'packets_sent', 'packets_recv', 'errin', 'errout', 'dropin', 'dropout'] def _to_list(obj): ''' Convert snetinfo object to list ''' ret = {} for attr in __attrs: if hasattr(obj, attr): ret[attr] = getattr(obj, attr) return ret def __virtual__(): if not HAS_PSUTIL: return (False, 'cannot load network_info beacon: psutil not available') return __virtualname__ def validate(config): ''' Validate the beacon configuration ''' VALID_ITEMS = [ 'type', 'bytes_sent', 'bytes_recv', 'packets_sent', 'packets_recv', 'errin', 'errout', 'dropin', 'dropout' ] # Configuration for load beacon should be a list of dicts if not isinstance(config, list): return False, ('Configuration for network_info beacon must be a list.') else: _config = {} list(map(_config.update, config)) for item in _config.get('interfaces', {}): if not isinstance(_config['interfaces'][item], dict): return False, ('Configuration for network_info beacon must ' 'be a list of dictionaries.') else: if not any(j in VALID_ITEMS for j in _config['interfaces'][item]): return False, ('Invalid configuration item in ' 'Beacon configuration.') return True, 'Valid beacon configuration'
saltstack/salt
salt/modules/apcups.py
status
python
def status(): ''' Return apcaccess output CLI Example: .. code-block:: bash salt '*' apcups.status ''' ret = {} apcaccess = _check_apcaccess() res = __salt__['cmd.run_all'](apcaccess) retcode = res['retcode'] if retcode != 0: ret['Error'] = 'Something with wrong executing apcaccess, is apcupsd running?' return ret for line in res['stdout'].splitlines(): line = line.split(':') ret[line[0].strip()] = line[1].strip() return ret
Return apcaccess output CLI Example: .. code-block:: bash salt '*' apcups.status
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/apcups.py#L42-L64
null
# -*- coding: utf-8 -*- ''' Module for apcupsd ''' from __future__ import absolute_import, print_function, unicode_literals # Import Python libs import logging # Import Salt libs import salt.utils.path import salt.utils.decorators as decorators log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'apcups' @decorators.memoize def _check_apcaccess(): ''' Looks to see if apcaccess is present on the system ''' return salt.utils.path.which('apcaccess') def __virtual__(): ''' Provides apcupsd only if apcaccess is present ''' if _check_apcaccess(): return __virtualname__ return ( False, '{0} module can only be loaded on when apcupsd is installed'.format( __virtualname__ ) ) def status_load(): ''' Return load CLI Example: .. code-block:: bash salt '*' apcups.status_load ''' data = status() if 'LOADPCT' in data: load = data['LOADPCT'].split() if load[1].lower() == 'percent': return float(load[0]) return {'Error': 'Load not available.'} def status_charge(): ''' Return battery charge CLI Example: .. code-block:: bash salt '*' apcups.status_charge ''' data = status() if 'BCHARGE' in data: charge = data['BCHARGE'].split() if charge[1].lower() == 'percent': return float(charge[0]) return {'Error': 'Load not available.'} def status_battery(): ''' Return true if running on battery power CLI Example: .. code-block:: bash salt '*' apcups.status_battery ''' data = status() if 'TONBATT' in data: return not data['TONBATT'] == '0 Seconds' return {'Error': 'Battery status not available.'} # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
saltstack/salt
salt/modules/apcups.py
status_load
python
def status_load(): ''' Return load CLI Example: .. code-block:: bash salt '*' apcups.status_load ''' data = status() if 'LOADPCT' in data: load = data['LOADPCT'].split() if load[1].lower() == 'percent': return float(load[0]) return {'Error': 'Load not available.'}
Return load CLI Example: .. code-block:: bash salt '*' apcups.status_load
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/apcups.py#L67-L83
[ "def status():\n '''\n Return apcaccess output\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' apcups.status\n '''\n ret = {}\n apcaccess = _check_apcaccess()\n res = __salt__['cmd.run_all'](apcaccess)\n retcode = res['retcode']\n if retcode != 0:\n ret['Error'] = 'Something with wrong executing apcaccess, is apcupsd running?'\n return ret\n\n for line in res['stdout'].splitlines():\n line = line.split(':')\n ret[line[0].strip()] = line[1].strip()\n\n return ret\n" ]
# -*- coding: utf-8 -*- ''' Module for apcupsd ''' from __future__ import absolute_import, print_function, unicode_literals # Import Python libs import logging # Import Salt libs import salt.utils.path import salt.utils.decorators as decorators log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'apcups' @decorators.memoize def _check_apcaccess(): ''' Looks to see if apcaccess is present on the system ''' return salt.utils.path.which('apcaccess') def __virtual__(): ''' Provides apcupsd only if apcaccess is present ''' if _check_apcaccess(): return __virtualname__ return ( False, '{0} module can only be loaded on when apcupsd is installed'.format( __virtualname__ ) ) def status(): ''' Return apcaccess output CLI Example: .. code-block:: bash salt '*' apcups.status ''' ret = {} apcaccess = _check_apcaccess() res = __salt__['cmd.run_all'](apcaccess) retcode = res['retcode'] if retcode != 0: ret['Error'] = 'Something with wrong executing apcaccess, is apcupsd running?' return ret for line in res['stdout'].splitlines(): line = line.split(':') ret[line[0].strip()] = line[1].strip() return ret def status_charge(): ''' Return battery charge CLI Example: .. code-block:: bash salt '*' apcups.status_charge ''' data = status() if 'BCHARGE' in data: charge = data['BCHARGE'].split() if charge[1].lower() == 'percent': return float(charge[0]) return {'Error': 'Load not available.'} def status_battery(): ''' Return true if running on battery power CLI Example: .. code-block:: bash salt '*' apcups.status_battery ''' data = status() if 'TONBATT' in data: return not data['TONBATT'] == '0 Seconds' return {'Error': 'Battery status not available.'} # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
saltstack/salt
salt/modules/apcups.py
status_charge
python
def status_charge(): ''' Return battery charge CLI Example: .. code-block:: bash salt '*' apcups.status_charge ''' data = status() if 'BCHARGE' in data: charge = data['BCHARGE'].split() if charge[1].lower() == 'percent': return float(charge[0]) return {'Error': 'Load not available.'}
Return battery charge CLI Example: .. code-block:: bash salt '*' apcups.status_charge
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/apcups.py#L86-L102
[ "def status():\n '''\n Return apcaccess output\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' apcups.status\n '''\n ret = {}\n apcaccess = _check_apcaccess()\n res = __salt__['cmd.run_all'](apcaccess)\n retcode = res['retcode']\n if retcode != 0:\n ret['Error'] = 'Something with wrong executing apcaccess, is apcupsd running?'\n return ret\n\n for line in res['stdout'].splitlines():\n line = line.split(':')\n ret[line[0].strip()] = line[1].strip()\n\n return ret\n" ]
# -*- coding: utf-8 -*- ''' Module for apcupsd ''' from __future__ import absolute_import, print_function, unicode_literals # Import Python libs import logging # Import Salt libs import salt.utils.path import salt.utils.decorators as decorators log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'apcups' @decorators.memoize def _check_apcaccess(): ''' Looks to see if apcaccess is present on the system ''' return salt.utils.path.which('apcaccess') def __virtual__(): ''' Provides apcupsd only if apcaccess is present ''' if _check_apcaccess(): return __virtualname__ return ( False, '{0} module can only be loaded on when apcupsd is installed'.format( __virtualname__ ) ) def status(): ''' Return apcaccess output CLI Example: .. code-block:: bash salt '*' apcups.status ''' ret = {} apcaccess = _check_apcaccess() res = __salt__['cmd.run_all'](apcaccess) retcode = res['retcode'] if retcode != 0: ret['Error'] = 'Something with wrong executing apcaccess, is apcupsd running?' return ret for line in res['stdout'].splitlines(): line = line.split(':') ret[line[0].strip()] = line[1].strip() return ret def status_load(): ''' Return load CLI Example: .. code-block:: bash salt '*' apcups.status_load ''' data = status() if 'LOADPCT' in data: load = data['LOADPCT'].split() if load[1].lower() == 'percent': return float(load[0]) return {'Error': 'Load not available.'} def status_battery(): ''' Return true if running on battery power CLI Example: .. code-block:: bash salt '*' apcups.status_battery ''' data = status() if 'TONBATT' in data: return not data['TONBATT'] == '0 Seconds' return {'Error': 'Battery status not available.'} # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
saltstack/salt
salt/states/esxi.py
coredump_configured
python
def coredump_configured(name, enabled, dump_ip, host_vnic='vmk0', dump_port=6500): ''' Ensures a host's core dump configuration. name Name of the state. enabled Sets whether or not ESXi core dump collection should be enabled. This is a boolean value set to ``True`` or ``False`` to enable or disable core dumps. Note that ESXi requires that the core dump must be enabled before any other parameters may be set. This also affects the ``changes`` results in the state return dictionary. If ``enabled`` is ``False``, we can't obtain any previous settings to compare other state variables, resulting in many ``old`` references returning ``None``. Once ``enabled`` is ``True`` the ``changes`` dictionary comparisons will be more accurate. This is due to the way the system coredemp network configuration command returns data. dump_ip The IP address of host that will accept the dump. host_vnic Host VNic port through which to communicate. Defaults to ``vmk0``. dump_port TCP port to use for the dump. Defaults to ``6500``. Example: .. code-block:: yaml configure-host-coredump: esxi.coredump_configured: - enabled: True - dump_ip: 'my-coredump-ip.example.com' ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} esxi_cmd = 'esxi.cmd' enabled_msg = 'ESXi requires that the core dump must be enabled ' \ 'before any other parameters may be set.' host = __pillar__['proxy']['host'] current_config = __salt__[esxi_cmd]('get_coredump_network_config').get(host) error = current_config.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret current_config = current_config.get('Coredump Config') current_enabled = current_config.get('enabled') # Configure coredump enabled state, if there are changes. if current_enabled != enabled: enabled_changes = {'enabled': {'old': current_enabled, 'new': enabled}} # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('coredump_network_enable', enabled=enabled).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret # Allow users to disable core dump, but then return since # nothing else can be set if core dump is disabled. if not enabled: ret['result'] = True ret['comment'] = enabled_msg ret['changes'].update(enabled_changes) return ret ret['changes'].update(enabled_changes) elif not enabled: # If current_enabled and enabled match, but are both False, # We must return before configuring anything. This isn't a # failure as core dump may be disabled intentionally. ret['result'] = True ret['comment'] = enabled_msg return ret # Test for changes with all remaining configurations. The changes flag is used # To detect changes, and then set_coredump_network_config is called one time. changes = False current_ip = current_config.get('ip') if current_ip != dump_ip: ret['changes'].update({'dump_ip': {'old': current_ip, 'new': dump_ip}}) changes = True current_vnic = current_config.get('host_vnic') if current_vnic != host_vnic: ret['changes'].update({'host_vnic': {'old': current_vnic, 'new': host_vnic}}) changes = True current_port = current_config.get('port') if current_port != six.text_type(dump_port): ret['changes'].update({'dump_port': {'old': current_port, 'new': six.text_type(dump_port)}}) changes = True # Only run the command if not using test=True and changes were detected. if not __opts__['test'] and changes is True: response = __salt__[esxi_cmd]('set_coredump_network_config', dump_ip=dump_ip, host_vnic=host_vnic, dump_port=dump_port).get(host) if response.get('success') is False: msg = response.get('stderr') if not msg: msg = response.get('stdout') ret['comment'] = 'Error: {0}'.format(msg) return ret ret['result'] = True if ret['changes'] == {}: ret['comment'] = 'Core Dump configuration is already in the desired state.' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'Core dump configuration will change.' return ret
Ensures a host's core dump configuration. name Name of the state. enabled Sets whether or not ESXi core dump collection should be enabled. This is a boolean value set to ``True`` or ``False`` to enable or disable core dumps. Note that ESXi requires that the core dump must be enabled before any other parameters may be set. This also affects the ``changes`` results in the state return dictionary. If ``enabled`` is ``False``, we can't obtain any previous settings to compare other state variables, resulting in many ``old`` references returning ``None``. Once ``enabled`` is ``True`` the ``changes`` dictionary comparisons will be more accurate. This is due to the way the system coredemp network configuration command returns data. dump_ip The IP address of host that will accept the dump. host_vnic Host VNic port through which to communicate. Defaults to ``vmk0``. dump_port TCP port to use for the dump. Defaults to ``6500``. Example: .. code-block:: yaml configure-host-coredump: esxi.coredump_configured: - enabled: True - dump_ip: 'my-coredump-ip.example.com'
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/esxi.py#L139-L274
null
# -*- coding: utf-8 -*- ''' Manage VMware ESXi Hosts. .. versionadded:: 2015.8.4 Dependencies ============ - pyVmomi Python Module - ESXCLI pyVmomi ------- PyVmomi can be installed via pip: .. code-block:: bash pip install pyVmomi .. note:: Version 6.0 of pyVmomi has some problems with SSL error handling on certain versions of Python. If using version 6.0 of pyVmomi, Python 2.6, Python 2.7.9, or newer must be present. This is due to an upstream dependency in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the version of Python is not in the supported range, you will need to install an earlier version of pyVmomi. See `Issue #29537`_ for more information. .. _Issue #29537: https://github.com/saltstack/salt/issues/29537 Based on the note above, to install an earlier version of pyVmomi than the version currently listed in PyPi, run the following: .. code-block:: bash pip install pyVmomi==5.5.0.2014.1.1 The 5.5.0.2014.1.1 is a known stable version that this original ESXi State Module was developed against. ESXCLI ------ Currently, about a third of the functions used in the vSphere Execution Module require the ESXCLI package be installed on the machine running the Proxy Minion process. The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware provides vCLI package installation instructions for `vSphere 5.5`_ and `vSphere 6.0`_. .. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html .. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html Once all of the required dependencies are in place and the vCLI package is installed, you can check to see if you can connect to your ESXi host or vCenter server by running the following command: .. code-block:: bash esxcli -s <host-location> -u <username> -p <password> system syslog config get If the connection was successful, ESXCLI was successfully installed on your system. You should see output related to the ESXi host's syslog configuration. .. note:: Be aware that some functionality in this state module may depend on the type of license attached to the ESXi host. For example, certain services are only available to manipulate service state or policies with a VMware vSphere Enterprise or Enterprise Plus license, while others are available with a Standard license. The ``ntpd`` service is restricted to an Enterprise Plus license, while ``ssh`` is available via the Standard license. Please see the `vSphere Comparison`_ page for more information. .. _vSphere Comparison: https://www.vmware.com/products/vsphere/compare About ----- This state module was written to be used in conjunction with Salt's :mod:`ESXi Proxy Minion <salt.proxy.esxi>`. For a tutorial on how to use Salt's ESXi Proxy Minion, please refer to the :ref:`ESXi Proxy Minion Tutorial <tutorial-esxi-proxy>` for configuration examples, dependency installation instructions, how to run remote execution functions against ESXi hosts via a Salt Proxy Minion, and a larger state example. ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import logging import sys import re # Import Salt Libs from salt.ext import six import salt.utils.files from salt.exceptions import CommandExecutionError, InvalidConfigError, \ VMwareObjectRetrievalError, VMwareSaltError, VMwareApiError, \ ArgumentValueError from salt.utils.decorators import depends from salt.config.schemas.esxi import DiskGroupsDiskScsiAddressSchema, \ HostCacheSchema # External libraries try: import jsonschema HAS_JSONSCHEMA = True except ImportError: HAS_JSONSCHEMA = False # Get Logging Started log = logging.getLogger(__name__) try: from pyVmomi import VmomiSupport # We check the supported vim versions to infer the pyVmomi version if 'vim25/6.0' in VmomiSupport.versionMap and \ sys.version_info > (2, 7) and sys.version_info < (2, 7, 9): log.debug('pyVmomi not loaded: Incompatible versions ' 'of Python. See Issue #29537.') raise ImportError() HAS_PYVMOMI = True except ImportError: HAS_PYVMOMI = False def __virtual__(): return 'esxi.cmd' in __salt__ def password_present(name, password): ''' Ensures the given password is set on the ESXi host. Passwords cannot be obtained from host, so if a password is set in this state, the ``vsphere.update_host_password`` function will always run (except when using test=True functionality) and the state's changes dictionary will always be populated. The username for which the password will change is the same username that is used to authenticate against the ESXi host via the Proxy Minion. For example, if the pillar definition for the proxy username is defined as ``root``, then the username that the password will be updated for via this state is ``root``. name Name of the state. password The new password to change on the host. Example: .. code-block:: yaml configure-host-password: esxi.password_present: - password: 'new-bad-password' ''' ret = {'name': name, 'result': True, 'changes': {'old': 'unknown', 'new': '********'}, 'comment': 'Host password was updated.'} esxi_cmd = 'esxi.cmd' if __opts__['test']: ret['result'] = None ret['comment'] = 'Host password will change.' return ret else: try: __salt__[esxi_cmd]('update_host_password', new_password=password) except CommandExecutionError as err: ret['result'] = False ret['comment'] = 'Error: {0}'.format(err) return ret return ret def ntp_configured(name, service_running, ntp_servers=None, service_policy=None, service_restart=False, update_datetime=False): ''' Ensures a host's NTP server configuration such as setting NTP servers, ensuring the NTP daemon is running or stopped, or restarting the NTP daemon for the ESXi host. name Name of the state. service_running Ensures the running state of the ntp daemon for the host. Boolean value where ``True`` indicates that ntpd should be running and ``False`` indicates that it should be stopped. ntp_servers A list of servers that should be added to the ESXi host's NTP configuration. service_policy The policy to set for the NTP service. .. note:: When setting the service policy to ``off`` or ``on``, you *must* quote the setting. If you don't, the yaml parser will set the string to a boolean, which will cause trouble checking for stateful changes and will error when trying to set the policy on the ESXi host. service_restart If set to ``True``, the ntp daemon will be restarted, regardless of its previous running state. Default is ``False``. update_datetime If set to ``True``, the date/time on the given host will be updated to UTC. Default setting is ``False``. This option should be used with caution since network delays and execution delays can result in time skews. Example: .. code-block:: yaml configure-host-ntp: esxi.ntp_configured: - service_running: True - ntp_servers: - 192.174.1.100 - 192.174.1.200 - service_policy: 'on' - service_restart: True ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} esxi_cmd = 'esxi.cmd' host = __pillar__['proxy']['host'] ntpd = 'ntpd' ntp_config = __salt__[esxi_cmd]('get_ntp_config').get(host) ntp_running = __salt__[esxi_cmd]('get_service_running', service_name=ntpd).get(host) error = ntp_running.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ntp_running = ntp_running.get(ntpd) # Configure NTP Servers for the Host if ntp_servers and set(ntp_servers) != set(ntp_config): # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('set_ntp_config', ntp_servers=ntp_servers).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret # Set changes dictionary for ntp_servers ret['changes'].update({'ntp_servers': {'old': ntp_config, 'new': ntp_servers}}) # Configure service_running state if service_running != ntp_running: # Only run the command if not using test=True if not __opts__['test']: # Start ntdp if service_running=True if ntp_running is True: response = __salt__[esxi_cmd]('service_start', service_name=ntpd).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret # Stop ntpd if service_running=False else: response = __salt__[esxi_cmd]('service_stop', service_name=ntpd).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'service_running': {'old': ntp_running, 'new': service_running}}) # Configure service_policy if service_policy: current_service_policy = __salt__[esxi_cmd]('get_service_policy', service_name=ntpd).get(host) error = current_service_policy.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret current_service_policy = current_service_policy.get(ntpd) if service_policy != current_service_policy: # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('set_service_policy', service_name=ntpd, service_policy=service_policy).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'service_policy': {'old': current_service_policy, 'new': service_policy}}) # Update datetime, if requested. if update_datetime: # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('update_host_datetime').get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'update_datetime': {'old': '', 'new': 'Host datetime was updated.'}}) # Restart ntp_service if service_restart=True if service_restart: # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('service_restart', service_name=ntpd).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'service_restart': {'old': '', 'new': 'NTP Daemon Restarted.'}}) ret['result'] = True if ret['changes'] == {}: ret['comment'] = 'NTP is already in the desired state.' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'NTP state will change.' return ret def vmotion_configured(name, enabled, device='vmk0'): ''' Configures a host's VMotion properties such as enabling VMotion and setting the device VirtualNic that VMotion will use. name Name of the state. enabled Ensures whether or not VMotion should be enabled on a host as a boolean value where ``True`` indicates that VMotion should be enabled and ``False`` indicates that VMotion should be disabled. device The device that uniquely identifies the VirtualNic that will be used for VMotion for the host. Defaults to ``vmk0``. Example: .. code-block:: yaml configure-vmotion: esxi.vmotion_configured: - enabled: True - device: sample-device ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} esxi_cmd = 'esxi.cmd' host = __pillar__['proxy']['host'] current_vmotion_enabled = __salt__[esxi_cmd]('get_vmotion_enabled').get(host) current_vmotion_enabled = current_vmotion_enabled.get('VMotion Enabled') # Configure VMotion Enabled state, if changed. if enabled != current_vmotion_enabled: # Only run the command if not using test=True if not __opts__['test']: # Enable VMotion if enabled=True if enabled is True: response = __salt__[esxi_cmd]('vmotion_enable', device=device).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret # Disable VMotion if enabled=False else: response = __salt__[esxi_cmd]('vmotion_disable').get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'enabled': {'old': current_vmotion_enabled, 'new': enabled}}) ret['result'] = True if ret['changes'] == {}: ret['comment'] = 'VMotion configuration is already in the desired state.' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'VMotion configuration will change.' return ret def vsan_configured(name, enabled, add_disks_to_vsan=False): ''' Configures a host's VSAN properties such as enabling or disabling VSAN, or adding VSAN-eligible disks to the VSAN system for the host. name Name of the state. enabled Ensures whether or not VSAN should be enabled on a host as a boolean value where ``True`` indicates that VSAN should be enabled and ``False`` indicates that VSAN should be disabled. add_disks_to_vsan If set to ``True``, any VSAN-eligible disks for the given host will be added to the host's VSAN system. Default is ``False``. Example: .. code-block:: yaml configure-host-vsan: esxi.vsan_configured: - enabled: True - add_disks_to_vsan: True ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} esxi_cmd = 'esxi.cmd' host = __pillar__['proxy']['host'] current_vsan_enabled = __salt__[esxi_cmd]('get_vsan_enabled').get(host) error = current_vsan_enabled.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret current_vsan_enabled = current_vsan_enabled.get('VSAN Enabled') # Configure VSAN Enabled state, if changed. if enabled != current_vsan_enabled: # Only run the command if not using test=True if not __opts__['test']: # Enable VSAN if enabled=True if enabled is True: response = __salt__[esxi_cmd]('vsan_enable').get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret # Disable VSAN if enabled=False else: response = __salt__[esxi_cmd]('vsan_disable').get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'enabled': {'old': current_vsan_enabled, 'new': enabled}}) # Add any eligible disks to VSAN, if requested. if add_disks_to_vsan: current_eligible_disks = __salt__[esxi_cmd]('get_vsan_eligible_disks').get(host) error = current_eligible_disks.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret disks = current_eligible_disks.get('Eligible') if disks and isinstance(disks, list): # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('vsan_add_disks').get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'add_disks_to_vsan': {'old': '', 'new': disks}}) ret['result'] = True if ret['changes'] == {}: ret['comment'] = 'VSAN configuration is already in the desired state.' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'VSAN configuration will change.' return ret def ssh_configured(name, service_running, ssh_key=None, ssh_key_file=None, service_policy=None, service_restart=False, certificate_verify=False): ''' Manage the SSH configuration for a host including whether or not SSH is running or the presence of a given SSH key. Note: Only one ssh key can be uploaded for root. Uploading a second key will replace any existing key. name Name of the state. service_running Ensures whether or not the SSH service should be running on a host. Represented as a boolean value where ``True`` indicates that SSH should be running and ``False`` indicates that SSH should stopped. In order to update SSH keys, the SSH service must be running. ssh_key Public SSH key to added to the authorized_keys file on the ESXi host. You can use ``ssh_key`` or ``ssh_key_file``, but not both. ssh_key_file File containing the public SSH key to be added to the authorized_keys file on the ESXi host. You can use ``ssh_key_file`` or ``ssh_key``, but not both. service_policy The policy to set for the NTP service. .. note:: When setting the service policy to ``off`` or ``on``, you *must* quote the setting. If you don't, the yaml parser will set the string to a boolean, which will cause trouble checking for stateful changes and will error when trying to set the policy on the ESXi host. service_restart If set to ``True``, the SSH service will be restarted, regardless of its previous running state. Default is ``False``. certificate_verify If set to ``True``, the SSL connection must present a valid certificate. Default is ``False``. Example: .. code-block:: yaml configure-host-ssh: esxi.ssh_configured: - service_running: True - ssh_key_file: /etc/salt/ssh_keys/my_key.pub - service_policy: 'on' - service_restart: True - certificate_verify: True ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} esxi_cmd = 'esxi.cmd' host = __pillar__['proxy']['host'] ssh = 'ssh' ssh_running = __salt__[esxi_cmd]('get_service_running', service_name=ssh).get(host) error = ssh_running.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ssh_running = ssh_running.get(ssh) # Configure SSH service_running state, if changed. if service_running != ssh_running: # Only actually run the command if not using test=True if not __opts__['test']: # Start SSH if service_running=True if service_running is True: enable = __salt__[esxi_cmd]('service_start', service_name=ssh).get(host) error = enable.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret # Disable SSH if service_running=False else: disable = __salt__[esxi_cmd]('service_stop', service_name=ssh).get(host) error = disable.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'service_running': {'old': ssh_running, 'new': service_running}}) # If uploading an SSH key or SSH key file, see if there's a current # SSH key and compare the current key to the key set in the state. current_ssh_key, ssh_key_changed = None, False if ssh_key or ssh_key_file: current_ssh_key = __salt__[esxi_cmd]('get_ssh_key', certificate_verify=certificate_verify) error = current_ssh_key.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret current_ssh_key = current_ssh_key.get('key') if current_ssh_key: clean_current_key = _strip_key(current_ssh_key).split(' ') if not ssh_key: ssh_key = '' # Open ssh key file and read in contents to create one key string with salt.utils.files.fopen(ssh_key_file, 'r') as key_file: for line in key_file: if line.startswith('#'): # Commented line continue ssh_key = ssh_key + line clean_ssh_key = _strip_key(ssh_key).split(' ') # Check that the first two list items of clean key lists are equal. if clean_current_key[0] != clean_ssh_key[0] or clean_current_key[1] != clean_ssh_key[1]: ssh_key_changed = True else: # If current_ssh_key is None, but we're setting a new key with # either ssh_key or ssh_key_file, then we need to flag the change. ssh_key_changed = True # Upload SSH key, if changed. if ssh_key_changed: if not __opts__['test']: # Upload key response = __salt__[esxi_cmd]('upload_ssh_key', ssh_key=ssh_key, ssh_key_file=ssh_key_file, certificate_verify=certificate_verify) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'SSH Key': {'old': current_ssh_key, 'new': ssh_key if ssh_key else ssh_key_file}}) # Configure service_policy if service_policy: current_service_policy = __salt__[esxi_cmd]('get_service_policy', service_name=ssh).get(host) error = current_service_policy.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret current_service_policy = current_service_policy.get(ssh) if service_policy != current_service_policy: # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('set_service_policy', service_name=ssh, service_policy=service_policy).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'service_policy': {'old': current_service_policy, 'new': service_policy}}) # Restart ssh_service if service_restart=True if service_restart: # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('service_restart', service_name=ssh).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'service_restart': {'old': '', 'new': 'SSH service restarted.'}}) ret['result'] = True if ret['changes'] == {}: ret['comment'] = 'SSH service is already in the desired state.' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'SSH service state will change.' return ret def syslog_configured(name, syslog_configs, firewall=True, reset_service=True, reset_syslog_config=False, reset_configs=None): ''' Ensures the specified syslog configuration parameters. By default, this state will reset the syslog service after any new or changed parameters are set successfully. name Name of the state. syslog_configs Name of parameter to set (corresponds to the command line switch for esxcli without the double dashes (--)) Valid syslog_config values are ``logdir``, ``loghost``, ``logdir-unique``, ``default-rotate``, ``default-size``, and ``default-timeout``. Each syslog_config option also needs a configuration value to set. For example, ``loghost`` requires URLs or IP addresses to use for logging. Multiple log servers can be specified by listing them, comma-separated, but without spaces before or after commas (reference: https://blogs.vmware.com/vsphere/2012/04/configuring-multiple-syslog-servers-for-esxi-5.html) firewall Enable the firewall rule set for syslog. Defaults to ``True``. reset_service After a successful parameter set, reset the service. Defaults to ``True``. reset_syslog_config Resets the syslog service to it's default settings. Defaults to ``False``. If set to ``True``, default settings defined by the list of syslog configs in ``reset_configs`` will be reset before running any other syslog settings. reset_configs A comma-delimited list of parameters to reset. Only runs if ``reset_syslog_config`` is set to ``True``. If ``reset_syslog_config`` is set to ``True``, but no syslog configs are listed in ``reset_configs``, then ``reset_configs`` will be set to ``all`` by default. See ``syslog_configs`` parameter above for a list of valid options. Example: .. code-block:: yaml configure-host-syslog: esxi.syslog_configured: - syslog_configs: loghost: ssl://localhost:5432,tcp://10.1.0.1:1514 default-timeout: 120 - firewall: True - reset_service: True - reset_syslog_config: True - reset_configs: loghost,default-timeout ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} esxi_cmd = 'esxi.cmd' host = __pillar__['proxy']['host'] if reset_syslog_config: if not reset_configs: reset_configs = 'all' # Only run the command if not using test=True if not __opts__['test']: reset = __salt__[esxi_cmd]('reset_syslog_config', syslog_config=reset_configs).get(host) for key, val in six.iteritems(reset): if isinstance(val, bool): continue if not val.get('success'): msg = val.get('message') if not msg: msg = 'There was an error resetting a syslog config \'{0}\'.' \ 'Please check debug logs.'.format(val) ret['comment'] = 'Error: {0}'.format(msg) return ret ret['changes'].update({'reset_syslog_config': {'old': '', 'new': reset_configs}}) current_firewall = __salt__[esxi_cmd]('get_firewall_status').get(host) error = current_firewall.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret current_firewall = current_firewall.get('rulesets').get('syslog') if current_firewall != firewall: # Only run the command if not using test=True if not __opts__['test']: enabled = __salt__[esxi_cmd]('enable_firewall_ruleset', ruleset_enable=firewall, ruleset_name='syslog').get(host) if enabled.get('retcode') != 0: err = enabled.get('stderr') out = enabled.get('stdout') ret['comment'] = 'Error: {0}'.format(err if err else out) return ret ret['changes'].update({'firewall': {'old': current_firewall, 'new': firewall}}) current_syslog_config = __salt__[esxi_cmd]('get_syslog_config').get(host) for key, val in six.iteritems(syslog_configs): # The output of get_syslog_config has different keys than the keys # Used to set syslog_config values. We need to look them up first. try: lookup_key = _lookup_syslog_config(key) except KeyError: ret['comment'] = '\'{0}\' is not a valid config variable.'.format(key) return ret current_val = current_syslog_config[lookup_key] if six.text_type(current_val) != six.text_type(val): # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('set_syslog_config', syslog_config=key, config_value=val, firewall=firewall, reset_service=reset_service).get(host) success = response.get(key).get('success') if not success: msg = response.get(key).get('message') if not msg: msg = 'There was an error setting syslog config \'{0}\'. ' \ 'Please check debug logs.'.format(key) ret['comment'] = msg return ret if not ret['changes'].get('syslog_config'): ret['changes'].update({'syslog_config': {}}) ret['changes']['syslog_config'].update({key: {'old': current_val, 'new': val}}) ret['result'] = True if ret['changes'] == {}: ret['comment'] = 'Syslog is already in the desired state.' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'Syslog state will change.' return ret @depends(HAS_PYVMOMI) @depends(HAS_JSONSCHEMA) def diskgroups_configured(name, diskgroups, erase_disks=False): ''' Configures the disk groups to use for vsan. This function will do the following: 1. Check whether or not all disks in the diskgroup spec exist, and raises and errors if they do not. 2. Create diskgroups with the correct disk configurations if diskgroup (identified by the cache disk canonical name) doesn't exist 3. Adds extra capacity disks to the existing diskgroup Example: .. code:: python { 'cache_scsi_addr': 'vmhba1:C0:T0:L0', 'capacity_scsi_addrs': [ 'vmhba2:C0:T0:L0', 'vmhba3:C0:T0:L0', 'vmhba4:C0:T0:L0', ] } name Mandatory state name diskgroups Disk group representation containing scsi disk addresses. Scsi addresses are expected for disks in the diskgroup: erase_disks Specifies whether to erase all partitions on all disks member of the disk group before the disk group is created. Default value is False. ''' proxy_details = __salt__['esxi.get_details']() hostname = proxy_details['host'] if not proxy_details.get('vcenter') \ else proxy_details['esxi_host'] log.info('Running state %s for host \'%s\'', name, hostname) # Variable used to return the result of the invocation ret = {'name': name, 'result': None, 'changes': {}, 'comments': None} # Signals if errors have been encountered errors = False # Signals if changes are required changes = False comments = [] diskgroup_changes = {} si = None try: log.trace('Validating diskgroups_configured input') schema = DiskGroupsDiskScsiAddressSchema.serialize() try: jsonschema.validate({'diskgroups': diskgroups, 'erase_disks': erase_disks}, schema) except jsonschema.exceptions.ValidationError as exc: raise InvalidConfigError(exc) si = __salt__['vsphere.get_service_instance_via_proxy']() host_disks = __salt__['vsphere.list_disks'](service_instance=si) if not host_disks: raise VMwareObjectRetrievalError( 'No disks retrieved from host \'{0}\''.format(hostname)) scsi_addr_to_disk_map = {d['scsi_address']: d for d in host_disks} log.trace('scsi_addr_to_disk_map = %s', scsi_addr_to_disk_map) existing_diskgroups = \ __salt__['vsphere.list_diskgroups'](service_instance=si) cache_disk_to_existing_diskgroup_map = \ {dg['cache_disk']: dg for dg in existing_diskgroups} except CommandExecutionError as err: log.error('Error: %s', err) if si: __salt__['vsphere.disconnect'](si) ret.update({ 'result': False if not __opts__['test'] else None, 'comment': six.text_type(err)}) return ret # Iterate through all of the disk groups for idx, dg in enumerate(diskgroups): # Check for cache disk if not dg['cache_scsi_addr'] in scsi_addr_to_disk_map: comments.append('No cache disk with scsi address \'{0}\' was ' 'found.'.format(dg['cache_scsi_addr'])) log.error(comments[-1]) errors = True continue # Check for capacity disks cache_disk_id = scsi_addr_to_disk_map[dg['cache_scsi_addr']]['id'] cache_disk_display = '{0} (id:{1})'.format(dg['cache_scsi_addr'], cache_disk_id) bad_scsi_addrs = [] capacity_disk_ids = [] capacity_disk_displays = [] for scsi_addr in dg['capacity_scsi_addrs']: if scsi_addr not in scsi_addr_to_disk_map: bad_scsi_addrs.append(scsi_addr) continue capacity_disk_ids.append(scsi_addr_to_disk_map[scsi_addr]['id']) capacity_disk_displays.append( '{0} (id:{1})'.format(scsi_addr, capacity_disk_ids[-1])) if bad_scsi_addrs: comments.append('Error in diskgroup #{0}: capacity disks with ' 'scsi addresses {1} were not found.' ''.format(idx, ', '.join(['\'{0}\''.format(a) for a in bad_scsi_addrs]))) log.error(comments[-1]) errors = True continue if not cache_disk_to_existing_diskgroup_map.get(cache_disk_id): # A new diskgroup needs to be created log.trace('erase_disks = %s', erase_disks) if erase_disks: if __opts__['test']: comments.append('State {0} will ' 'erase all disks of disk group #{1}; ' 'cache disk: \'{2}\', ' 'capacity disk(s): {3}.' ''.format(name, idx, cache_disk_display, ', '.join( ['\'{}\''.format(a) for a in capacity_disk_displays]))) else: # Erase disk group disks for disk_id in [cache_disk_id] + capacity_disk_ids: __salt__['vsphere.erase_disk_partitions']( disk_id=disk_id, service_instance=si) comments.append('Erased disks of diskgroup #{0}; ' 'cache disk: \'{1}\', capacity disk(s): ' '{2}'.format( idx, cache_disk_display, ', '.join(['\'{0}\''.format(a) for a in capacity_disk_displays]))) log.info(comments[-1]) if __opts__['test']: comments.append('State {0} will create ' 'the disk group #{1}; cache disk: \'{2}\', ' 'capacity disk(s): {3}.' .format(name, idx, cache_disk_display, ', '.join(['\'{0}\''.format(a) for a in capacity_disk_displays]))) log.info(comments[-1]) changes = True continue try: __salt__['vsphere.create_diskgroup'](cache_disk_id, capacity_disk_ids, safety_checks=False, service_instance=si) except VMwareSaltError as err: comments.append('Error creating disk group #{0}: ' '{1}.'.format(idx, err)) log.error(comments[-1]) errors = True continue comments.append('Created disk group #\'{0}\'.'.format(idx)) log.info(comments[-1]) diskgroup_changes[six.text_type(idx)] = \ {'new': {'cache': cache_disk_display, 'capacity': capacity_disk_displays}} changes = True continue # The diskgroup exists; checking the capacity disks log.debug('Disk group #%s exists. Checking capacity disks: %s.', idx, capacity_disk_displays) existing_diskgroup = \ cache_disk_to_existing_diskgroup_map.get(cache_disk_id) existing_capacity_disk_displays = \ ['{0} (id:{1})'.format([d['scsi_address'] for d in host_disks if d['id'] == disk_id][0], disk_id) for disk_id in existing_diskgroup['capacity_disks']] # Populate added disks and removed disks and their displays added_capacity_disk_ids = [] added_capacity_disk_displays = [] removed_capacity_disk_ids = [] removed_capacity_disk_displays = [] for disk_id in capacity_disk_ids: if disk_id not in existing_diskgroup['capacity_disks']: disk_scsi_addr = [d['scsi_address'] for d in host_disks if d['id'] == disk_id][0] added_capacity_disk_ids.append(disk_id) added_capacity_disk_displays.append( '{0} (id:{1})'.format(disk_scsi_addr, disk_id)) for disk_id in existing_diskgroup['capacity_disks']: if disk_id not in capacity_disk_ids: disk_scsi_addr = [d['scsi_address'] for d in host_disks if d['id'] == disk_id][0] removed_capacity_disk_ids.append(disk_id) removed_capacity_disk_displays.append( '{0} (id:{1})'.format(disk_scsi_addr, disk_id)) log.debug('Disk group #%s: existing capacity disk ids: %s; added ' 'capacity disk ids: %s; removed capacity disk ids: %s', idx, existing_capacity_disk_displays, added_capacity_disk_displays, removed_capacity_disk_displays) #TODO revisit this when removing capacity disks is supported if removed_capacity_disk_ids: comments.append( 'Error removing capacity disk(s) {0} from disk group #{1}; ' 'operation is not supported.' ''.format(', '.join(['\'{0}\''.format(id) for id in removed_capacity_disk_displays]), idx)) log.error(comments[-1]) errors = True continue if added_capacity_disk_ids: # Capacity disks need to be added to disk group # Building a string representation of the capacity disks # that need to be added s = ', '.join(['\'{0}\''.format(id) for id in added_capacity_disk_displays]) if __opts__['test']: comments.append('State {0} will add ' 'capacity disk(s) {1} to disk group #{2}.' ''.format(name, s, idx)) log.info(comments[-1]) changes = True continue try: __salt__['vsphere.add_capacity_to_diskgroup']( cache_disk_id, added_capacity_disk_ids, safety_checks=False, service_instance=si) except VMwareSaltError as err: comments.append('Error adding capacity disk(s) {0} to ' 'disk group #{1}: {2}.'.format(s, idx, err)) log.error(comments[-1]) errors = True continue com = ('Added capacity disk(s) {0} to disk group #{1}' ''.format(s, idx)) log.info(com) comments.append(com) diskgroup_changes[six.text_type(idx)] = \ {'new': {'cache': cache_disk_display, 'capacity': capacity_disk_displays}, 'old': {'cache': cache_disk_display, 'capacity': existing_capacity_disk_displays}} changes = True continue # No capacity needs to be added s = ('Disk group #{0} is correctly configured. Nothing to be done.' ''.format(idx)) log.info(s) comments.append(s) __salt__['vsphere.disconnect'](si) #Build the final return message result = (True if not (changes or errors) else # no changes/errors None if __opts__['test'] else # running in test mode False if errors else True) # found errors; defaults to True ret.update({'result': result, 'comment': '\n'.join(comments), 'changes': diskgroup_changes}) return ret @depends(HAS_PYVMOMI) @depends(HAS_JSONSCHEMA) def host_cache_configured(name, enabled, datastore, swap_size='100%', dedicated_backing_disk=False, erase_backing_disk=False): ''' Configures the host cache used for swapping. It will do the following: 1. Checks if backing disk exists 2. Creates the VMFS datastore if doesn't exist (datastore partition will be created and use the entire disk) 3. Raises an error if ``dedicated_backing_disk`` is ``True`` and partitions already exist on the backing disk 4. Configures host_cache to use a portion of the datastore for caching (either a specific size or a percentage of the datastore) Examples Percentage swap size (can't be 100%) .. code:: python { 'enabled': true, 'datastore': { 'backing_disk_scsi_addr': 'vmhba0:C0:T0:L0', 'vmfs_version': 5, 'name': 'hostcache' } 'dedicated_backing_disk': false 'swap_size': '98%', } Fixed sized swap size .. code:: python { 'enabled': true, 'datastore': { 'backing_disk_scsi_addr': 'vmhba0:C0:T0:L0', 'vmfs_version': 5, 'name': 'hostcache' } 'dedicated_backing_disk': true 'swap_size': '10GiB', } name Mandatory state name. enabled Specifies whether the host cache is enabled. datastore Specifies the host cache datastore. swap_size Specifies the size of the host cache swap. Can be a percentage or a value in GiB. Default value is ``100%``. dedicated_backing_disk Specifies whether the backing disk is dedicated to the host cache which means it must have no other partitions. Default is False erase_backing_disk Specifies whether to erase all partitions on the backing disk before the datastore is created. Default value is False. ''' log.trace('enabled = %s', enabled) log.trace('datastore = %s', datastore) log.trace('swap_size = %s', swap_size) log.trace('erase_backing_disk = %s', erase_backing_disk) # Variable used to return the result of the invocation proxy_details = __salt__['esxi.get_details']() hostname = proxy_details['host'] if not proxy_details.get('vcenter') \ else proxy_details['esxi_host'] log.trace('hostname = %s', hostname) log.info('Running host_cache_swap_configured for host \'%s\'', hostname) ret = {'name': hostname, 'comment': 'Default comments', 'result': None, 'changes': {}} result = None if __opts__['test'] else True # We assume success needs_setting = False comments = [] changes = {} si = None try: log.debug('Validating host_cache_configured input') schema = HostCacheSchema.serialize() try: jsonschema.validate({'enabled': enabled, 'datastore': datastore, 'swap_size': swap_size, 'erase_backing_disk': erase_backing_disk}, schema) except jsonschema.exceptions.ValidationError as exc: raise InvalidConfigError(exc) m = re.match(r'(\d+)(%|GiB)', swap_size) swap_size_value = int(m.group(1)) swap_type = m.group(2) log.trace('swap_size_value = %s; swap_type = %s', swap_size_value, swap_type) si = __salt__['vsphere.get_service_instance_via_proxy']() host_cache = __salt__['vsphere.get_host_cache'](service_instance=si) # Check enabled if host_cache['enabled'] != enabled: changes.update({'enabled': {'old': host_cache['enabled'], 'new': enabled}}) needs_setting = True # Check datastores existing_datastores = None if host_cache.get('datastore'): existing_datastores = \ __salt__['vsphere.list_datastores_via_proxy']( datastore_names=[datastore['name']], service_instance=si) # Retrieve backing disks existing_disks = __salt__['vsphere.list_disks']( scsi_addresses=[datastore['backing_disk_scsi_addr']], service_instance=si) if not existing_disks: raise VMwareObjectRetrievalError( 'Disk with scsi address \'{0}\' was not found in host \'{1}\'' ''.format(datastore['backing_disk_scsi_addr'], hostname)) backing_disk = existing_disks[0] backing_disk_display = '{0} (id:{1})'.format( backing_disk['scsi_address'], backing_disk['id']) log.trace('backing_disk = %s', backing_disk_display) existing_datastore = None if not existing_datastores: # Check if disk needs to be erased if erase_backing_disk: if __opts__['test']: comments.append('State {0} will erase ' 'the backing disk \'{1}\' on host \'{2}\'.' ''.format(name, backing_disk_display, hostname)) log.info(comments[-1]) else: # Erase disk __salt__['vsphere.erase_disk_partitions']( disk_id=backing_disk['id'], service_instance=si) comments.append('Erased backing disk \'{0}\' on host ' '\'{1}\'.'.format(backing_disk_display, hostname)) log.info(comments[-1]) # Create the datastore if __opts__['test']: comments.append('State {0} will create ' 'the datastore \'{1}\', with backing disk ' '\'{2}\', on host \'{3}\'.' ''.format(name, datastore['name'], backing_disk_display, hostname)) log.info(comments[-1]) else: if dedicated_backing_disk: # Check backing disk doesn't already have partitions partitions = __salt__['vsphere.list_disk_partitions']( disk_id=backing_disk['id'], service_instance=si) log.trace('partitions = %s', partitions) # We will ignore the mbr partitions non_mbr_partitions = [p for p in partitions if p['format'] != 'mbr'] if non_mbr_partitions: raise VMwareApiError( 'Backing disk \'{0}\' has unexpected partitions' ''.format(backing_disk_display)) __salt__['vsphere.create_vmfs_datastore']( datastore['name'], existing_disks[0]['id'], datastore['vmfs_version'], service_instance=si) comments.append('Created vmfs datastore \'{0}\', backed by ' 'disk \'{1}\', on host \'{2}\'.' ''.format(datastore['name'], backing_disk_display, hostname)) log.info(comments[-1]) changes.update( {'datastore': {'new': {'name': datastore['name'], 'backing_disk': backing_disk_display}}}) existing_datastore = \ __salt__['vsphere.list_datastores_via_proxy']( datastore_names=[datastore['name']], service_instance=si)[0] needs_setting = True else: # Check datastore is backed by the correct disk if not existing_datastores[0].get('backing_disk_ids'): raise VMwareSaltError('Datastore \'{0}\' doesn\'t have a ' 'backing disk' ''.format(datastore['name'])) if backing_disk['id'] not in \ existing_datastores[0]['backing_disk_ids']: raise VMwareSaltError( 'Datastore \'{0}\' is not backed by the correct disk: ' 'expected \'{1}\'; got {2}' ''.format( datastore['name'], backing_disk['id'], ', '.join( ['\'{0}\''.format(disk) for disk in existing_datastores[0]['backing_disk_ids']]))) comments.append('Datastore \'{0}\' already exists on host \'{1}\' ' 'and is backed by disk \'{2}\'. Nothing to be ' 'done.'.format(datastore['name'], hostname, backing_disk_display)) existing_datastore = existing_datastores[0] log.trace('existing_datastore = %s', existing_datastore) log.info(comments[-1]) if existing_datastore: # The following comparisons can be done if the existing_datastore # is set; it may not be set if running in test mode # # We support percent, as well as MiB, we will convert the size # to MiB, multiples of 1024 (VMware SDK limitation) if swap_type == '%': # Percentage swap size # Convert from bytes to MiB raw_size_MiB = (swap_size_value/100.0) * \ (existing_datastore['capacity']/1024/1024) else: raw_size_MiB = swap_size_value * 1024 log.trace('raw_size = %sMiB', raw_size_MiB) swap_size_MiB = int(raw_size_MiB/1024)*1024 log.trace('adjusted swap_size = %sMiB', swap_size_MiB) existing_swap_size_MiB = 0 m = re.match(r'(\d+)MiB', host_cache.get('swap_size')) if \ host_cache.get('swap_size') else None if m: # if swap_size from the host is set and has an expected value # we are going to parse it to get the number of MiBs existing_swap_size_MiB = int(m.group(1)) if not existing_swap_size_MiB == swap_size_MiB: needs_setting = True changes.update( {'swap_size': {'old': '{}GiB'.format(existing_swap_size_MiB/1024), 'new': '{}GiB'.format(swap_size_MiB/1024)}}) if needs_setting: if __opts__['test']: comments.append('State {0} will configure ' 'the host cache on host \'{1}\' to: {2}.' ''.format(name, hostname, {'enabled': enabled, 'datastore_name': datastore['name'], 'swap_size': swap_size})) else: if (existing_datastore['capacity'] / 1024.0**2) < \ swap_size_MiB: raise ArgumentValueError( 'Capacity of host cache datastore \'{0}\' ({1} MiB) is ' 'smaller than the required swap size ({2} MiB)' ''.format(existing_datastore['name'], existing_datastore['capacity'] / 1024.0**2, swap_size_MiB)) __salt__['vsphere.configure_host_cache']( enabled, datastore['name'], swap_size_MiB=swap_size_MiB, service_instance=si) comments.append('Host cache configured on host ' '\'{0}\'.'.format(hostname)) else: comments.append('Host cache on host \'{0}\' is already correctly ' 'configured. Nothing to be done.'.format(hostname)) result = True __salt__['vsphere.disconnect'](si) log.info(comments[-1]) ret.update({'comment': '\n'.join(comments), 'result': result, 'changes': changes}) return ret except CommandExecutionError as err: log.error('Error: %s.', err) if si: __salt__['vsphere.disconnect'](si) ret.update({ 'result': False if not __opts__['test'] else None, 'comment': '{}.'.format(err)}) return ret def _lookup_syslog_config(config): ''' Helper function that looks up syslog_config keys available from ``vsphere.get_syslog_config``. ''' lookup = {'default-timeout': 'Default Network Retry Timeout', 'logdir': 'Local Log Output', 'default-size': 'Local Logging Default Rotation Size', 'logdir-unique': 'Log To Unique Subdirectory', 'default-rotate': 'Local Logging Default Rotations', 'loghost': 'Remote Host'} return lookup.get(config) def _strip_key(key_string): ''' Strips an SSH key string of white space and line endings and returns the new string. key_string The string to be stripped. ''' key_string.strip() key_string.replace('\n', '') key_string.replace('\r\n', '') return key_string
saltstack/salt
salt/states/esxi.py
password_present
python
def password_present(name, password): ''' Ensures the given password is set on the ESXi host. Passwords cannot be obtained from host, so if a password is set in this state, the ``vsphere.update_host_password`` function will always run (except when using test=True functionality) and the state's changes dictionary will always be populated. The username for which the password will change is the same username that is used to authenticate against the ESXi host via the Proxy Minion. For example, if the pillar definition for the proxy username is defined as ``root``, then the username that the password will be updated for via this state is ``root``. name Name of the state. password The new password to change on the host. Example: .. code-block:: yaml configure-host-password: esxi.password_present: - password: 'new-bad-password' ''' ret = {'name': name, 'result': True, 'changes': {'old': 'unknown', 'new': '********'}, 'comment': 'Host password was updated.'} esxi_cmd = 'esxi.cmd' if __opts__['test']: ret['result'] = None ret['comment'] = 'Host password will change.' return ret else: try: __salt__[esxi_cmd]('update_host_password', new_password=password) except CommandExecutionError as err: ret['result'] = False ret['comment'] = 'Error: {0}'.format(err) return ret return ret
Ensures the given password is set on the ESXi host. Passwords cannot be obtained from host, so if a password is set in this state, the ``vsphere.update_host_password`` function will always run (except when using test=True functionality) and the state's changes dictionary will always be populated. The username for which the password will change is the same username that is used to authenticate against the ESXi host via the Proxy Minion. For example, if the pillar definition for the proxy username is defined as ``root``, then the username that the password will be updated for via this state is ``root``. name Name of the state. password The new password to change on the host. Example: .. code-block:: yaml configure-host-password: esxi.password_present: - password: 'new-bad-password'
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/esxi.py#L277-L323
null
# -*- coding: utf-8 -*- ''' Manage VMware ESXi Hosts. .. versionadded:: 2015.8.4 Dependencies ============ - pyVmomi Python Module - ESXCLI pyVmomi ------- PyVmomi can be installed via pip: .. code-block:: bash pip install pyVmomi .. note:: Version 6.0 of pyVmomi has some problems with SSL error handling on certain versions of Python. If using version 6.0 of pyVmomi, Python 2.6, Python 2.7.9, or newer must be present. This is due to an upstream dependency in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the version of Python is not in the supported range, you will need to install an earlier version of pyVmomi. See `Issue #29537`_ for more information. .. _Issue #29537: https://github.com/saltstack/salt/issues/29537 Based on the note above, to install an earlier version of pyVmomi than the version currently listed in PyPi, run the following: .. code-block:: bash pip install pyVmomi==5.5.0.2014.1.1 The 5.5.0.2014.1.1 is a known stable version that this original ESXi State Module was developed against. ESXCLI ------ Currently, about a third of the functions used in the vSphere Execution Module require the ESXCLI package be installed on the machine running the Proxy Minion process. The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware provides vCLI package installation instructions for `vSphere 5.5`_ and `vSphere 6.0`_. .. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html .. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html Once all of the required dependencies are in place and the vCLI package is installed, you can check to see if you can connect to your ESXi host or vCenter server by running the following command: .. code-block:: bash esxcli -s <host-location> -u <username> -p <password> system syslog config get If the connection was successful, ESXCLI was successfully installed on your system. You should see output related to the ESXi host's syslog configuration. .. note:: Be aware that some functionality in this state module may depend on the type of license attached to the ESXi host. For example, certain services are only available to manipulate service state or policies with a VMware vSphere Enterprise or Enterprise Plus license, while others are available with a Standard license. The ``ntpd`` service is restricted to an Enterprise Plus license, while ``ssh`` is available via the Standard license. Please see the `vSphere Comparison`_ page for more information. .. _vSphere Comparison: https://www.vmware.com/products/vsphere/compare About ----- This state module was written to be used in conjunction with Salt's :mod:`ESXi Proxy Minion <salt.proxy.esxi>`. For a tutorial on how to use Salt's ESXi Proxy Minion, please refer to the :ref:`ESXi Proxy Minion Tutorial <tutorial-esxi-proxy>` for configuration examples, dependency installation instructions, how to run remote execution functions against ESXi hosts via a Salt Proxy Minion, and a larger state example. ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import logging import sys import re # Import Salt Libs from salt.ext import six import salt.utils.files from salt.exceptions import CommandExecutionError, InvalidConfigError, \ VMwareObjectRetrievalError, VMwareSaltError, VMwareApiError, \ ArgumentValueError from salt.utils.decorators import depends from salt.config.schemas.esxi import DiskGroupsDiskScsiAddressSchema, \ HostCacheSchema # External libraries try: import jsonschema HAS_JSONSCHEMA = True except ImportError: HAS_JSONSCHEMA = False # Get Logging Started log = logging.getLogger(__name__) try: from pyVmomi import VmomiSupport # We check the supported vim versions to infer the pyVmomi version if 'vim25/6.0' in VmomiSupport.versionMap and \ sys.version_info > (2, 7) and sys.version_info < (2, 7, 9): log.debug('pyVmomi not loaded: Incompatible versions ' 'of Python. See Issue #29537.') raise ImportError() HAS_PYVMOMI = True except ImportError: HAS_PYVMOMI = False def __virtual__(): return 'esxi.cmd' in __salt__ def coredump_configured(name, enabled, dump_ip, host_vnic='vmk0', dump_port=6500): ''' Ensures a host's core dump configuration. name Name of the state. enabled Sets whether or not ESXi core dump collection should be enabled. This is a boolean value set to ``True`` or ``False`` to enable or disable core dumps. Note that ESXi requires that the core dump must be enabled before any other parameters may be set. This also affects the ``changes`` results in the state return dictionary. If ``enabled`` is ``False``, we can't obtain any previous settings to compare other state variables, resulting in many ``old`` references returning ``None``. Once ``enabled`` is ``True`` the ``changes`` dictionary comparisons will be more accurate. This is due to the way the system coredemp network configuration command returns data. dump_ip The IP address of host that will accept the dump. host_vnic Host VNic port through which to communicate. Defaults to ``vmk0``. dump_port TCP port to use for the dump. Defaults to ``6500``. Example: .. code-block:: yaml configure-host-coredump: esxi.coredump_configured: - enabled: True - dump_ip: 'my-coredump-ip.example.com' ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} esxi_cmd = 'esxi.cmd' enabled_msg = 'ESXi requires that the core dump must be enabled ' \ 'before any other parameters may be set.' host = __pillar__['proxy']['host'] current_config = __salt__[esxi_cmd]('get_coredump_network_config').get(host) error = current_config.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret current_config = current_config.get('Coredump Config') current_enabled = current_config.get('enabled') # Configure coredump enabled state, if there are changes. if current_enabled != enabled: enabled_changes = {'enabled': {'old': current_enabled, 'new': enabled}} # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('coredump_network_enable', enabled=enabled).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret # Allow users to disable core dump, but then return since # nothing else can be set if core dump is disabled. if not enabled: ret['result'] = True ret['comment'] = enabled_msg ret['changes'].update(enabled_changes) return ret ret['changes'].update(enabled_changes) elif not enabled: # If current_enabled and enabled match, but are both False, # We must return before configuring anything. This isn't a # failure as core dump may be disabled intentionally. ret['result'] = True ret['comment'] = enabled_msg return ret # Test for changes with all remaining configurations. The changes flag is used # To detect changes, and then set_coredump_network_config is called one time. changes = False current_ip = current_config.get('ip') if current_ip != dump_ip: ret['changes'].update({'dump_ip': {'old': current_ip, 'new': dump_ip}}) changes = True current_vnic = current_config.get('host_vnic') if current_vnic != host_vnic: ret['changes'].update({'host_vnic': {'old': current_vnic, 'new': host_vnic}}) changes = True current_port = current_config.get('port') if current_port != six.text_type(dump_port): ret['changes'].update({'dump_port': {'old': current_port, 'new': six.text_type(dump_port)}}) changes = True # Only run the command if not using test=True and changes were detected. if not __opts__['test'] and changes is True: response = __salt__[esxi_cmd]('set_coredump_network_config', dump_ip=dump_ip, host_vnic=host_vnic, dump_port=dump_port).get(host) if response.get('success') is False: msg = response.get('stderr') if not msg: msg = response.get('stdout') ret['comment'] = 'Error: {0}'.format(msg) return ret ret['result'] = True if ret['changes'] == {}: ret['comment'] = 'Core Dump configuration is already in the desired state.' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'Core dump configuration will change.' return ret def ntp_configured(name, service_running, ntp_servers=None, service_policy=None, service_restart=False, update_datetime=False): ''' Ensures a host's NTP server configuration such as setting NTP servers, ensuring the NTP daemon is running or stopped, or restarting the NTP daemon for the ESXi host. name Name of the state. service_running Ensures the running state of the ntp daemon for the host. Boolean value where ``True`` indicates that ntpd should be running and ``False`` indicates that it should be stopped. ntp_servers A list of servers that should be added to the ESXi host's NTP configuration. service_policy The policy to set for the NTP service. .. note:: When setting the service policy to ``off`` or ``on``, you *must* quote the setting. If you don't, the yaml parser will set the string to a boolean, which will cause trouble checking for stateful changes and will error when trying to set the policy on the ESXi host. service_restart If set to ``True``, the ntp daemon will be restarted, regardless of its previous running state. Default is ``False``. update_datetime If set to ``True``, the date/time on the given host will be updated to UTC. Default setting is ``False``. This option should be used with caution since network delays and execution delays can result in time skews. Example: .. code-block:: yaml configure-host-ntp: esxi.ntp_configured: - service_running: True - ntp_servers: - 192.174.1.100 - 192.174.1.200 - service_policy: 'on' - service_restart: True ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} esxi_cmd = 'esxi.cmd' host = __pillar__['proxy']['host'] ntpd = 'ntpd' ntp_config = __salt__[esxi_cmd]('get_ntp_config').get(host) ntp_running = __salt__[esxi_cmd]('get_service_running', service_name=ntpd).get(host) error = ntp_running.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ntp_running = ntp_running.get(ntpd) # Configure NTP Servers for the Host if ntp_servers and set(ntp_servers) != set(ntp_config): # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('set_ntp_config', ntp_servers=ntp_servers).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret # Set changes dictionary for ntp_servers ret['changes'].update({'ntp_servers': {'old': ntp_config, 'new': ntp_servers}}) # Configure service_running state if service_running != ntp_running: # Only run the command if not using test=True if not __opts__['test']: # Start ntdp if service_running=True if ntp_running is True: response = __salt__[esxi_cmd]('service_start', service_name=ntpd).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret # Stop ntpd if service_running=False else: response = __salt__[esxi_cmd]('service_stop', service_name=ntpd).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'service_running': {'old': ntp_running, 'new': service_running}}) # Configure service_policy if service_policy: current_service_policy = __salt__[esxi_cmd]('get_service_policy', service_name=ntpd).get(host) error = current_service_policy.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret current_service_policy = current_service_policy.get(ntpd) if service_policy != current_service_policy: # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('set_service_policy', service_name=ntpd, service_policy=service_policy).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'service_policy': {'old': current_service_policy, 'new': service_policy}}) # Update datetime, if requested. if update_datetime: # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('update_host_datetime').get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'update_datetime': {'old': '', 'new': 'Host datetime was updated.'}}) # Restart ntp_service if service_restart=True if service_restart: # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('service_restart', service_name=ntpd).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'service_restart': {'old': '', 'new': 'NTP Daemon Restarted.'}}) ret['result'] = True if ret['changes'] == {}: ret['comment'] = 'NTP is already in the desired state.' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'NTP state will change.' return ret def vmotion_configured(name, enabled, device='vmk0'): ''' Configures a host's VMotion properties such as enabling VMotion and setting the device VirtualNic that VMotion will use. name Name of the state. enabled Ensures whether or not VMotion should be enabled on a host as a boolean value where ``True`` indicates that VMotion should be enabled and ``False`` indicates that VMotion should be disabled. device The device that uniquely identifies the VirtualNic that will be used for VMotion for the host. Defaults to ``vmk0``. Example: .. code-block:: yaml configure-vmotion: esxi.vmotion_configured: - enabled: True - device: sample-device ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} esxi_cmd = 'esxi.cmd' host = __pillar__['proxy']['host'] current_vmotion_enabled = __salt__[esxi_cmd]('get_vmotion_enabled').get(host) current_vmotion_enabled = current_vmotion_enabled.get('VMotion Enabled') # Configure VMotion Enabled state, if changed. if enabled != current_vmotion_enabled: # Only run the command if not using test=True if not __opts__['test']: # Enable VMotion if enabled=True if enabled is True: response = __salt__[esxi_cmd]('vmotion_enable', device=device).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret # Disable VMotion if enabled=False else: response = __salt__[esxi_cmd]('vmotion_disable').get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'enabled': {'old': current_vmotion_enabled, 'new': enabled}}) ret['result'] = True if ret['changes'] == {}: ret['comment'] = 'VMotion configuration is already in the desired state.' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'VMotion configuration will change.' return ret def vsan_configured(name, enabled, add_disks_to_vsan=False): ''' Configures a host's VSAN properties such as enabling or disabling VSAN, or adding VSAN-eligible disks to the VSAN system for the host. name Name of the state. enabled Ensures whether or not VSAN should be enabled on a host as a boolean value where ``True`` indicates that VSAN should be enabled and ``False`` indicates that VSAN should be disabled. add_disks_to_vsan If set to ``True``, any VSAN-eligible disks for the given host will be added to the host's VSAN system. Default is ``False``. Example: .. code-block:: yaml configure-host-vsan: esxi.vsan_configured: - enabled: True - add_disks_to_vsan: True ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} esxi_cmd = 'esxi.cmd' host = __pillar__['proxy']['host'] current_vsan_enabled = __salt__[esxi_cmd]('get_vsan_enabled').get(host) error = current_vsan_enabled.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret current_vsan_enabled = current_vsan_enabled.get('VSAN Enabled') # Configure VSAN Enabled state, if changed. if enabled != current_vsan_enabled: # Only run the command if not using test=True if not __opts__['test']: # Enable VSAN if enabled=True if enabled is True: response = __salt__[esxi_cmd]('vsan_enable').get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret # Disable VSAN if enabled=False else: response = __salt__[esxi_cmd]('vsan_disable').get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'enabled': {'old': current_vsan_enabled, 'new': enabled}}) # Add any eligible disks to VSAN, if requested. if add_disks_to_vsan: current_eligible_disks = __salt__[esxi_cmd]('get_vsan_eligible_disks').get(host) error = current_eligible_disks.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret disks = current_eligible_disks.get('Eligible') if disks and isinstance(disks, list): # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('vsan_add_disks').get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'add_disks_to_vsan': {'old': '', 'new': disks}}) ret['result'] = True if ret['changes'] == {}: ret['comment'] = 'VSAN configuration is already in the desired state.' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'VSAN configuration will change.' return ret def ssh_configured(name, service_running, ssh_key=None, ssh_key_file=None, service_policy=None, service_restart=False, certificate_verify=False): ''' Manage the SSH configuration for a host including whether or not SSH is running or the presence of a given SSH key. Note: Only one ssh key can be uploaded for root. Uploading a second key will replace any existing key. name Name of the state. service_running Ensures whether or not the SSH service should be running on a host. Represented as a boolean value where ``True`` indicates that SSH should be running and ``False`` indicates that SSH should stopped. In order to update SSH keys, the SSH service must be running. ssh_key Public SSH key to added to the authorized_keys file on the ESXi host. You can use ``ssh_key`` or ``ssh_key_file``, but not both. ssh_key_file File containing the public SSH key to be added to the authorized_keys file on the ESXi host. You can use ``ssh_key_file`` or ``ssh_key``, but not both. service_policy The policy to set for the NTP service. .. note:: When setting the service policy to ``off`` or ``on``, you *must* quote the setting. If you don't, the yaml parser will set the string to a boolean, which will cause trouble checking for stateful changes and will error when trying to set the policy on the ESXi host. service_restart If set to ``True``, the SSH service will be restarted, regardless of its previous running state. Default is ``False``. certificate_verify If set to ``True``, the SSL connection must present a valid certificate. Default is ``False``. Example: .. code-block:: yaml configure-host-ssh: esxi.ssh_configured: - service_running: True - ssh_key_file: /etc/salt/ssh_keys/my_key.pub - service_policy: 'on' - service_restart: True - certificate_verify: True ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} esxi_cmd = 'esxi.cmd' host = __pillar__['proxy']['host'] ssh = 'ssh' ssh_running = __salt__[esxi_cmd]('get_service_running', service_name=ssh).get(host) error = ssh_running.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ssh_running = ssh_running.get(ssh) # Configure SSH service_running state, if changed. if service_running != ssh_running: # Only actually run the command if not using test=True if not __opts__['test']: # Start SSH if service_running=True if service_running is True: enable = __salt__[esxi_cmd]('service_start', service_name=ssh).get(host) error = enable.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret # Disable SSH if service_running=False else: disable = __salt__[esxi_cmd]('service_stop', service_name=ssh).get(host) error = disable.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'service_running': {'old': ssh_running, 'new': service_running}}) # If uploading an SSH key or SSH key file, see if there's a current # SSH key and compare the current key to the key set in the state. current_ssh_key, ssh_key_changed = None, False if ssh_key or ssh_key_file: current_ssh_key = __salt__[esxi_cmd]('get_ssh_key', certificate_verify=certificate_verify) error = current_ssh_key.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret current_ssh_key = current_ssh_key.get('key') if current_ssh_key: clean_current_key = _strip_key(current_ssh_key).split(' ') if not ssh_key: ssh_key = '' # Open ssh key file and read in contents to create one key string with salt.utils.files.fopen(ssh_key_file, 'r') as key_file: for line in key_file: if line.startswith('#'): # Commented line continue ssh_key = ssh_key + line clean_ssh_key = _strip_key(ssh_key).split(' ') # Check that the first two list items of clean key lists are equal. if clean_current_key[0] != clean_ssh_key[0] or clean_current_key[1] != clean_ssh_key[1]: ssh_key_changed = True else: # If current_ssh_key is None, but we're setting a new key with # either ssh_key or ssh_key_file, then we need to flag the change. ssh_key_changed = True # Upload SSH key, if changed. if ssh_key_changed: if not __opts__['test']: # Upload key response = __salt__[esxi_cmd]('upload_ssh_key', ssh_key=ssh_key, ssh_key_file=ssh_key_file, certificate_verify=certificate_verify) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'SSH Key': {'old': current_ssh_key, 'new': ssh_key if ssh_key else ssh_key_file}}) # Configure service_policy if service_policy: current_service_policy = __salt__[esxi_cmd]('get_service_policy', service_name=ssh).get(host) error = current_service_policy.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret current_service_policy = current_service_policy.get(ssh) if service_policy != current_service_policy: # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('set_service_policy', service_name=ssh, service_policy=service_policy).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'service_policy': {'old': current_service_policy, 'new': service_policy}}) # Restart ssh_service if service_restart=True if service_restart: # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('service_restart', service_name=ssh).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'service_restart': {'old': '', 'new': 'SSH service restarted.'}}) ret['result'] = True if ret['changes'] == {}: ret['comment'] = 'SSH service is already in the desired state.' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'SSH service state will change.' return ret def syslog_configured(name, syslog_configs, firewall=True, reset_service=True, reset_syslog_config=False, reset_configs=None): ''' Ensures the specified syslog configuration parameters. By default, this state will reset the syslog service after any new or changed parameters are set successfully. name Name of the state. syslog_configs Name of parameter to set (corresponds to the command line switch for esxcli without the double dashes (--)) Valid syslog_config values are ``logdir``, ``loghost``, ``logdir-unique``, ``default-rotate``, ``default-size``, and ``default-timeout``. Each syslog_config option also needs a configuration value to set. For example, ``loghost`` requires URLs or IP addresses to use for logging. Multiple log servers can be specified by listing them, comma-separated, but without spaces before or after commas (reference: https://blogs.vmware.com/vsphere/2012/04/configuring-multiple-syslog-servers-for-esxi-5.html) firewall Enable the firewall rule set for syslog. Defaults to ``True``. reset_service After a successful parameter set, reset the service. Defaults to ``True``. reset_syslog_config Resets the syslog service to it's default settings. Defaults to ``False``. If set to ``True``, default settings defined by the list of syslog configs in ``reset_configs`` will be reset before running any other syslog settings. reset_configs A comma-delimited list of parameters to reset. Only runs if ``reset_syslog_config`` is set to ``True``. If ``reset_syslog_config`` is set to ``True``, but no syslog configs are listed in ``reset_configs``, then ``reset_configs`` will be set to ``all`` by default. See ``syslog_configs`` parameter above for a list of valid options. Example: .. code-block:: yaml configure-host-syslog: esxi.syslog_configured: - syslog_configs: loghost: ssl://localhost:5432,tcp://10.1.0.1:1514 default-timeout: 120 - firewall: True - reset_service: True - reset_syslog_config: True - reset_configs: loghost,default-timeout ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} esxi_cmd = 'esxi.cmd' host = __pillar__['proxy']['host'] if reset_syslog_config: if not reset_configs: reset_configs = 'all' # Only run the command if not using test=True if not __opts__['test']: reset = __salt__[esxi_cmd]('reset_syslog_config', syslog_config=reset_configs).get(host) for key, val in six.iteritems(reset): if isinstance(val, bool): continue if not val.get('success'): msg = val.get('message') if not msg: msg = 'There was an error resetting a syslog config \'{0}\'.' \ 'Please check debug logs.'.format(val) ret['comment'] = 'Error: {0}'.format(msg) return ret ret['changes'].update({'reset_syslog_config': {'old': '', 'new': reset_configs}}) current_firewall = __salt__[esxi_cmd]('get_firewall_status').get(host) error = current_firewall.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret current_firewall = current_firewall.get('rulesets').get('syslog') if current_firewall != firewall: # Only run the command if not using test=True if not __opts__['test']: enabled = __salt__[esxi_cmd]('enable_firewall_ruleset', ruleset_enable=firewall, ruleset_name='syslog').get(host) if enabled.get('retcode') != 0: err = enabled.get('stderr') out = enabled.get('stdout') ret['comment'] = 'Error: {0}'.format(err if err else out) return ret ret['changes'].update({'firewall': {'old': current_firewall, 'new': firewall}}) current_syslog_config = __salt__[esxi_cmd]('get_syslog_config').get(host) for key, val in six.iteritems(syslog_configs): # The output of get_syslog_config has different keys than the keys # Used to set syslog_config values. We need to look them up first. try: lookup_key = _lookup_syslog_config(key) except KeyError: ret['comment'] = '\'{0}\' is not a valid config variable.'.format(key) return ret current_val = current_syslog_config[lookup_key] if six.text_type(current_val) != six.text_type(val): # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('set_syslog_config', syslog_config=key, config_value=val, firewall=firewall, reset_service=reset_service).get(host) success = response.get(key).get('success') if not success: msg = response.get(key).get('message') if not msg: msg = 'There was an error setting syslog config \'{0}\'. ' \ 'Please check debug logs.'.format(key) ret['comment'] = msg return ret if not ret['changes'].get('syslog_config'): ret['changes'].update({'syslog_config': {}}) ret['changes']['syslog_config'].update({key: {'old': current_val, 'new': val}}) ret['result'] = True if ret['changes'] == {}: ret['comment'] = 'Syslog is already in the desired state.' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'Syslog state will change.' return ret @depends(HAS_PYVMOMI) @depends(HAS_JSONSCHEMA) def diskgroups_configured(name, diskgroups, erase_disks=False): ''' Configures the disk groups to use for vsan. This function will do the following: 1. Check whether or not all disks in the diskgroup spec exist, and raises and errors if they do not. 2. Create diskgroups with the correct disk configurations if diskgroup (identified by the cache disk canonical name) doesn't exist 3. Adds extra capacity disks to the existing diskgroup Example: .. code:: python { 'cache_scsi_addr': 'vmhba1:C0:T0:L0', 'capacity_scsi_addrs': [ 'vmhba2:C0:T0:L0', 'vmhba3:C0:T0:L0', 'vmhba4:C0:T0:L0', ] } name Mandatory state name diskgroups Disk group representation containing scsi disk addresses. Scsi addresses are expected for disks in the diskgroup: erase_disks Specifies whether to erase all partitions on all disks member of the disk group before the disk group is created. Default value is False. ''' proxy_details = __salt__['esxi.get_details']() hostname = proxy_details['host'] if not proxy_details.get('vcenter') \ else proxy_details['esxi_host'] log.info('Running state %s for host \'%s\'', name, hostname) # Variable used to return the result of the invocation ret = {'name': name, 'result': None, 'changes': {}, 'comments': None} # Signals if errors have been encountered errors = False # Signals if changes are required changes = False comments = [] diskgroup_changes = {} si = None try: log.trace('Validating diskgroups_configured input') schema = DiskGroupsDiskScsiAddressSchema.serialize() try: jsonschema.validate({'diskgroups': diskgroups, 'erase_disks': erase_disks}, schema) except jsonschema.exceptions.ValidationError as exc: raise InvalidConfigError(exc) si = __salt__['vsphere.get_service_instance_via_proxy']() host_disks = __salt__['vsphere.list_disks'](service_instance=si) if not host_disks: raise VMwareObjectRetrievalError( 'No disks retrieved from host \'{0}\''.format(hostname)) scsi_addr_to_disk_map = {d['scsi_address']: d for d in host_disks} log.trace('scsi_addr_to_disk_map = %s', scsi_addr_to_disk_map) existing_diskgroups = \ __salt__['vsphere.list_diskgroups'](service_instance=si) cache_disk_to_existing_diskgroup_map = \ {dg['cache_disk']: dg for dg in existing_diskgroups} except CommandExecutionError as err: log.error('Error: %s', err) if si: __salt__['vsphere.disconnect'](si) ret.update({ 'result': False if not __opts__['test'] else None, 'comment': six.text_type(err)}) return ret # Iterate through all of the disk groups for idx, dg in enumerate(diskgroups): # Check for cache disk if not dg['cache_scsi_addr'] in scsi_addr_to_disk_map: comments.append('No cache disk with scsi address \'{0}\' was ' 'found.'.format(dg['cache_scsi_addr'])) log.error(comments[-1]) errors = True continue # Check for capacity disks cache_disk_id = scsi_addr_to_disk_map[dg['cache_scsi_addr']]['id'] cache_disk_display = '{0} (id:{1})'.format(dg['cache_scsi_addr'], cache_disk_id) bad_scsi_addrs = [] capacity_disk_ids = [] capacity_disk_displays = [] for scsi_addr in dg['capacity_scsi_addrs']: if scsi_addr not in scsi_addr_to_disk_map: bad_scsi_addrs.append(scsi_addr) continue capacity_disk_ids.append(scsi_addr_to_disk_map[scsi_addr]['id']) capacity_disk_displays.append( '{0} (id:{1})'.format(scsi_addr, capacity_disk_ids[-1])) if bad_scsi_addrs: comments.append('Error in diskgroup #{0}: capacity disks with ' 'scsi addresses {1} were not found.' ''.format(idx, ', '.join(['\'{0}\''.format(a) for a in bad_scsi_addrs]))) log.error(comments[-1]) errors = True continue if not cache_disk_to_existing_diskgroup_map.get(cache_disk_id): # A new diskgroup needs to be created log.trace('erase_disks = %s', erase_disks) if erase_disks: if __opts__['test']: comments.append('State {0} will ' 'erase all disks of disk group #{1}; ' 'cache disk: \'{2}\', ' 'capacity disk(s): {3}.' ''.format(name, idx, cache_disk_display, ', '.join( ['\'{}\''.format(a) for a in capacity_disk_displays]))) else: # Erase disk group disks for disk_id in [cache_disk_id] + capacity_disk_ids: __salt__['vsphere.erase_disk_partitions']( disk_id=disk_id, service_instance=si) comments.append('Erased disks of diskgroup #{0}; ' 'cache disk: \'{1}\', capacity disk(s): ' '{2}'.format( idx, cache_disk_display, ', '.join(['\'{0}\''.format(a) for a in capacity_disk_displays]))) log.info(comments[-1]) if __opts__['test']: comments.append('State {0} will create ' 'the disk group #{1}; cache disk: \'{2}\', ' 'capacity disk(s): {3}.' .format(name, idx, cache_disk_display, ', '.join(['\'{0}\''.format(a) for a in capacity_disk_displays]))) log.info(comments[-1]) changes = True continue try: __salt__['vsphere.create_diskgroup'](cache_disk_id, capacity_disk_ids, safety_checks=False, service_instance=si) except VMwareSaltError as err: comments.append('Error creating disk group #{0}: ' '{1}.'.format(idx, err)) log.error(comments[-1]) errors = True continue comments.append('Created disk group #\'{0}\'.'.format(idx)) log.info(comments[-1]) diskgroup_changes[six.text_type(idx)] = \ {'new': {'cache': cache_disk_display, 'capacity': capacity_disk_displays}} changes = True continue # The diskgroup exists; checking the capacity disks log.debug('Disk group #%s exists. Checking capacity disks: %s.', idx, capacity_disk_displays) existing_diskgroup = \ cache_disk_to_existing_diskgroup_map.get(cache_disk_id) existing_capacity_disk_displays = \ ['{0} (id:{1})'.format([d['scsi_address'] for d in host_disks if d['id'] == disk_id][0], disk_id) for disk_id in existing_diskgroup['capacity_disks']] # Populate added disks and removed disks and their displays added_capacity_disk_ids = [] added_capacity_disk_displays = [] removed_capacity_disk_ids = [] removed_capacity_disk_displays = [] for disk_id in capacity_disk_ids: if disk_id not in existing_diskgroup['capacity_disks']: disk_scsi_addr = [d['scsi_address'] for d in host_disks if d['id'] == disk_id][0] added_capacity_disk_ids.append(disk_id) added_capacity_disk_displays.append( '{0} (id:{1})'.format(disk_scsi_addr, disk_id)) for disk_id in existing_diskgroup['capacity_disks']: if disk_id not in capacity_disk_ids: disk_scsi_addr = [d['scsi_address'] for d in host_disks if d['id'] == disk_id][0] removed_capacity_disk_ids.append(disk_id) removed_capacity_disk_displays.append( '{0} (id:{1})'.format(disk_scsi_addr, disk_id)) log.debug('Disk group #%s: existing capacity disk ids: %s; added ' 'capacity disk ids: %s; removed capacity disk ids: %s', idx, existing_capacity_disk_displays, added_capacity_disk_displays, removed_capacity_disk_displays) #TODO revisit this when removing capacity disks is supported if removed_capacity_disk_ids: comments.append( 'Error removing capacity disk(s) {0} from disk group #{1}; ' 'operation is not supported.' ''.format(', '.join(['\'{0}\''.format(id) for id in removed_capacity_disk_displays]), idx)) log.error(comments[-1]) errors = True continue if added_capacity_disk_ids: # Capacity disks need to be added to disk group # Building a string representation of the capacity disks # that need to be added s = ', '.join(['\'{0}\''.format(id) for id in added_capacity_disk_displays]) if __opts__['test']: comments.append('State {0} will add ' 'capacity disk(s) {1} to disk group #{2}.' ''.format(name, s, idx)) log.info(comments[-1]) changes = True continue try: __salt__['vsphere.add_capacity_to_diskgroup']( cache_disk_id, added_capacity_disk_ids, safety_checks=False, service_instance=si) except VMwareSaltError as err: comments.append('Error adding capacity disk(s) {0} to ' 'disk group #{1}: {2}.'.format(s, idx, err)) log.error(comments[-1]) errors = True continue com = ('Added capacity disk(s) {0} to disk group #{1}' ''.format(s, idx)) log.info(com) comments.append(com) diskgroup_changes[six.text_type(idx)] = \ {'new': {'cache': cache_disk_display, 'capacity': capacity_disk_displays}, 'old': {'cache': cache_disk_display, 'capacity': existing_capacity_disk_displays}} changes = True continue # No capacity needs to be added s = ('Disk group #{0} is correctly configured. Nothing to be done.' ''.format(idx)) log.info(s) comments.append(s) __salt__['vsphere.disconnect'](si) #Build the final return message result = (True if not (changes or errors) else # no changes/errors None if __opts__['test'] else # running in test mode False if errors else True) # found errors; defaults to True ret.update({'result': result, 'comment': '\n'.join(comments), 'changes': diskgroup_changes}) return ret @depends(HAS_PYVMOMI) @depends(HAS_JSONSCHEMA) def host_cache_configured(name, enabled, datastore, swap_size='100%', dedicated_backing_disk=False, erase_backing_disk=False): ''' Configures the host cache used for swapping. It will do the following: 1. Checks if backing disk exists 2. Creates the VMFS datastore if doesn't exist (datastore partition will be created and use the entire disk) 3. Raises an error if ``dedicated_backing_disk`` is ``True`` and partitions already exist on the backing disk 4. Configures host_cache to use a portion of the datastore for caching (either a specific size or a percentage of the datastore) Examples Percentage swap size (can't be 100%) .. code:: python { 'enabled': true, 'datastore': { 'backing_disk_scsi_addr': 'vmhba0:C0:T0:L0', 'vmfs_version': 5, 'name': 'hostcache' } 'dedicated_backing_disk': false 'swap_size': '98%', } Fixed sized swap size .. code:: python { 'enabled': true, 'datastore': { 'backing_disk_scsi_addr': 'vmhba0:C0:T0:L0', 'vmfs_version': 5, 'name': 'hostcache' } 'dedicated_backing_disk': true 'swap_size': '10GiB', } name Mandatory state name. enabled Specifies whether the host cache is enabled. datastore Specifies the host cache datastore. swap_size Specifies the size of the host cache swap. Can be a percentage or a value in GiB. Default value is ``100%``. dedicated_backing_disk Specifies whether the backing disk is dedicated to the host cache which means it must have no other partitions. Default is False erase_backing_disk Specifies whether to erase all partitions on the backing disk before the datastore is created. Default value is False. ''' log.trace('enabled = %s', enabled) log.trace('datastore = %s', datastore) log.trace('swap_size = %s', swap_size) log.trace('erase_backing_disk = %s', erase_backing_disk) # Variable used to return the result of the invocation proxy_details = __salt__['esxi.get_details']() hostname = proxy_details['host'] if not proxy_details.get('vcenter') \ else proxy_details['esxi_host'] log.trace('hostname = %s', hostname) log.info('Running host_cache_swap_configured for host \'%s\'', hostname) ret = {'name': hostname, 'comment': 'Default comments', 'result': None, 'changes': {}} result = None if __opts__['test'] else True # We assume success needs_setting = False comments = [] changes = {} si = None try: log.debug('Validating host_cache_configured input') schema = HostCacheSchema.serialize() try: jsonschema.validate({'enabled': enabled, 'datastore': datastore, 'swap_size': swap_size, 'erase_backing_disk': erase_backing_disk}, schema) except jsonschema.exceptions.ValidationError as exc: raise InvalidConfigError(exc) m = re.match(r'(\d+)(%|GiB)', swap_size) swap_size_value = int(m.group(1)) swap_type = m.group(2) log.trace('swap_size_value = %s; swap_type = %s', swap_size_value, swap_type) si = __salt__['vsphere.get_service_instance_via_proxy']() host_cache = __salt__['vsphere.get_host_cache'](service_instance=si) # Check enabled if host_cache['enabled'] != enabled: changes.update({'enabled': {'old': host_cache['enabled'], 'new': enabled}}) needs_setting = True # Check datastores existing_datastores = None if host_cache.get('datastore'): existing_datastores = \ __salt__['vsphere.list_datastores_via_proxy']( datastore_names=[datastore['name']], service_instance=si) # Retrieve backing disks existing_disks = __salt__['vsphere.list_disks']( scsi_addresses=[datastore['backing_disk_scsi_addr']], service_instance=si) if not existing_disks: raise VMwareObjectRetrievalError( 'Disk with scsi address \'{0}\' was not found in host \'{1}\'' ''.format(datastore['backing_disk_scsi_addr'], hostname)) backing_disk = existing_disks[0] backing_disk_display = '{0} (id:{1})'.format( backing_disk['scsi_address'], backing_disk['id']) log.trace('backing_disk = %s', backing_disk_display) existing_datastore = None if not existing_datastores: # Check if disk needs to be erased if erase_backing_disk: if __opts__['test']: comments.append('State {0} will erase ' 'the backing disk \'{1}\' on host \'{2}\'.' ''.format(name, backing_disk_display, hostname)) log.info(comments[-1]) else: # Erase disk __salt__['vsphere.erase_disk_partitions']( disk_id=backing_disk['id'], service_instance=si) comments.append('Erased backing disk \'{0}\' on host ' '\'{1}\'.'.format(backing_disk_display, hostname)) log.info(comments[-1]) # Create the datastore if __opts__['test']: comments.append('State {0} will create ' 'the datastore \'{1}\', with backing disk ' '\'{2}\', on host \'{3}\'.' ''.format(name, datastore['name'], backing_disk_display, hostname)) log.info(comments[-1]) else: if dedicated_backing_disk: # Check backing disk doesn't already have partitions partitions = __salt__['vsphere.list_disk_partitions']( disk_id=backing_disk['id'], service_instance=si) log.trace('partitions = %s', partitions) # We will ignore the mbr partitions non_mbr_partitions = [p for p in partitions if p['format'] != 'mbr'] if non_mbr_partitions: raise VMwareApiError( 'Backing disk \'{0}\' has unexpected partitions' ''.format(backing_disk_display)) __salt__['vsphere.create_vmfs_datastore']( datastore['name'], existing_disks[0]['id'], datastore['vmfs_version'], service_instance=si) comments.append('Created vmfs datastore \'{0}\', backed by ' 'disk \'{1}\', on host \'{2}\'.' ''.format(datastore['name'], backing_disk_display, hostname)) log.info(comments[-1]) changes.update( {'datastore': {'new': {'name': datastore['name'], 'backing_disk': backing_disk_display}}}) existing_datastore = \ __salt__['vsphere.list_datastores_via_proxy']( datastore_names=[datastore['name']], service_instance=si)[0] needs_setting = True else: # Check datastore is backed by the correct disk if not existing_datastores[0].get('backing_disk_ids'): raise VMwareSaltError('Datastore \'{0}\' doesn\'t have a ' 'backing disk' ''.format(datastore['name'])) if backing_disk['id'] not in \ existing_datastores[0]['backing_disk_ids']: raise VMwareSaltError( 'Datastore \'{0}\' is not backed by the correct disk: ' 'expected \'{1}\'; got {2}' ''.format( datastore['name'], backing_disk['id'], ', '.join( ['\'{0}\''.format(disk) for disk in existing_datastores[0]['backing_disk_ids']]))) comments.append('Datastore \'{0}\' already exists on host \'{1}\' ' 'and is backed by disk \'{2}\'. Nothing to be ' 'done.'.format(datastore['name'], hostname, backing_disk_display)) existing_datastore = existing_datastores[0] log.trace('existing_datastore = %s', existing_datastore) log.info(comments[-1]) if existing_datastore: # The following comparisons can be done if the existing_datastore # is set; it may not be set if running in test mode # # We support percent, as well as MiB, we will convert the size # to MiB, multiples of 1024 (VMware SDK limitation) if swap_type == '%': # Percentage swap size # Convert from bytes to MiB raw_size_MiB = (swap_size_value/100.0) * \ (existing_datastore['capacity']/1024/1024) else: raw_size_MiB = swap_size_value * 1024 log.trace('raw_size = %sMiB', raw_size_MiB) swap_size_MiB = int(raw_size_MiB/1024)*1024 log.trace('adjusted swap_size = %sMiB', swap_size_MiB) existing_swap_size_MiB = 0 m = re.match(r'(\d+)MiB', host_cache.get('swap_size')) if \ host_cache.get('swap_size') else None if m: # if swap_size from the host is set and has an expected value # we are going to parse it to get the number of MiBs existing_swap_size_MiB = int(m.group(1)) if not existing_swap_size_MiB == swap_size_MiB: needs_setting = True changes.update( {'swap_size': {'old': '{}GiB'.format(existing_swap_size_MiB/1024), 'new': '{}GiB'.format(swap_size_MiB/1024)}}) if needs_setting: if __opts__['test']: comments.append('State {0} will configure ' 'the host cache on host \'{1}\' to: {2}.' ''.format(name, hostname, {'enabled': enabled, 'datastore_name': datastore['name'], 'swap_size': swap_size})) else: if (existing_datastore['capacity'] / 1024.0**2) < \ swap_size_MiB: raise ArgumentValueError( 'Capacity of host cache datastore \'{0}\' ({1} MiB) is ' 'smaller than the required swap size ({2} MiB)' ''.format(existing_datastore['name'], existing_datastore['capacity'] / 1024.0**2, swap_size_MiB)) __salt__['vsphere.configure_host_cache']( enabled, datastore['name'], swap_size_MiB=swap_size_MiB, service_instance=si) comments.append('Host cache configured on host ' '\'{0}\'.'.format(hostname)) else: comments.append('Host cache on host \'{0}\' is already correctly ' 'configured. Nothing to be done.'.format(hostname)) result = True __salt__['vsphere.disconnect'](si) log.info(comments[-1]) ret.update({'comment': '\n'.join(comments), 'result': result, 'changes': changes}) return ret except CommandExecutionError as err: log.error('Error: %s.', err) if si: __salt__['vsphere.disconnect'](si) ret.update({ 'result': False if not __opts__['test'] else None, 'comment': '{}.'.format(err)}) return ret def _lookup_syslog_config(config): ''' Helper function that looks up syslog_config keys available from ``vsphere.get_syslog_config``. ''' lookup = {'default-timeout': 'Default Network Retry Timeout', 'logdir': 'Local Log Output', 'default-size': 'Local Logging Default Rotation Size', 'logdir-unique': 'Log To Unique Subdirectory', 'default-rotate': 'Local Logging Default Rotations', 'loghost': 'Remote Host'} return lookup.get(config) def _strip_key(key_string): ''' Strips an SSH key string of white space and line endings and returns the new string. key_string The string to be stripped. ''' key_string.strip() key_string.replace('\n', '') key_string.replace('\r\n', '') return key_string
saltstack/salt
salt/states/esxi.py
ntp_configured
python
def ntp_configured(name, service_running, ntp_servers=None, service_policy=None, service_restart=False, update_datetime=False): ''' Ensures a host's NTP server configuration such as setting NTP servers, ensuring the NTP daemon is running or stopped, or restarting the NTP daemon for the ESXi host. name Name of the state. service_running Ensures the running state of the ntp daemon for the host. Boolean value where ``True`` indicates that ntpd should be running and ``False`` indicates that it should be stopped. ntp_servers A list of servers that should be added to the ESXi host's NTP configuration. service_policy The policy to set for the NTP service. .. note:: When setting the service policy to ``off`` or ``on``, you *must* quote the setting. If you don't, the yaml parser will set the string to a boolean, which will cause trouble checking for stateful changes and will error when trying to set the policy on the ESXi host. service_restart If set to ``True``, the ntp daemon will be restarted, regardless of its previous running state. Default is ``False``. update_datetime If set to ``True``, the date/time on the given host will be updated to UTC. Default setting is ``False``. This option should be used with caution since network delays and execution delays can result in time skews. Example: .. code-block:: yaml configure-host-ntp: esxi.ntp_configured: - service_running: True - ntp_servers: - 192.174.1.100 - 192.174.1.200 - service_policy: 'on' - service_restart: True ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} esxi_cmd = 'esxi.cmd' host = __pillar__['proxy']['host'] ntpd = 'ntpd' ntp_config = __salt__[esxi_cmd]('get_ntp_config').get(host) ntp_running = __salt__[esxi_cmd]('get_service_running', service_name=ntpd).get(host) error = ntp_running.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ntp_running = ntp_running.get(ntpd) # Configure NTP Servers for the Host if ntp_servers and set(ntp_servers) != set(ntp_config): # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('set_ntp_config', ntp_servers=ntp_servers).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret # Set changes dictionary for ntp_servers ret['changes'].update({'ntp_servers': {'old': ntp_config, 'new': ntp_servers}}) # Configure service_running state if service_running != ntp_running: # Only run the command if not using test=True if not __opts__['test']: # Start ntdp if service_running=True if ntp_running is True: response = __salt__[esxi_cmd]('service_start', service_name=ntpd).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret # Stop ntpd if service_running=False else: response = __salt__[esxi_cmd]('service_stop', service_name=ntpd).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'service_running': {'old': ntp_running, 'new': service_running}}) # Configure service_policy if service_policy: current_service_policy = __salt__[esxi_cmd]('get_service_policy', service_name=ntpd).get(host) error = current_service_policy.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret current_service_policy = current_service_policy.get(ntpd) if service_policy != current_service_policy: # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('set_service_policy', service_name=ntpd, service_policy=service_policy).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'service_policy': {'old': current_service_policy, 'new': service_policy}}) # Update datetime, if requested. if update_datetime: # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('update_host_datetime').get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'update_datetime': {'old': '', 'new': 'Host datetime was updated.'}}) # Restart ntp_service if service_restart=True if service_restart: # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('service_restart', service_name=ntpd).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'service_restart': {'old': '', 'new': 'NTP Daemon Restarted.'}}) ret['result'] = True if ret['changes'] == {}: ret['comment'] = 'NTP is already in the desired state.' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'NTP state will change.' return ret
Ensures a host's NTP server configuration such as setting NTP servers, ensuring the NTP daemon is running or stopped, or restarting the NTP daemon for the ESXi host. name Name of the state. service_running Ensures the running state of the ntp daemon for the host. Boolean value where ``True`` indicates that ntpd should be running and ``False`` indicates that it should be stopped. ntp_servers A list of servers that should be added to the ESXi host's NTP configuration. service_policy The policy to set for the NTP service. .. note:: When setting the service policy to ``off`` or ``on``, you *must* quote the setting. If you don't, the yaml parser will set the string to a boolean, which will cause trouble checking for stateful changes and will error when trying to set the policy on the ESXi host. service_restart If set to ``True``, the ntp daemon will be restarted, regardless of its previous running state. Default is ``False``. update_datetime If set to ``True``, the date/time on the given host will be updated to UTC. Default setting is ``False``. This option should be used with caution since network delays and execution delays can result in time skews. Example: .. code-block:: yaml configure-host-ntp: esxi.ntp_configured: - service_running: True - ntp_servers: - 192.174.1.100 - 192.174.1.200 - service_policy: 'on' - service_restart: True
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/esxi.py#L326-L497
null
# -*- coding: utf-8 -*- ''' Manage VMware ESXi Hosts. .. versionadded:: 2015.8.4 Dependencies ============ - pyVmomi Python Module - ESXCLI pyVmomi ------- PyVmomi can be installed via pip: .. code-block:: bash pip install pyVmomi .. note:: Version 6.0 of pyVmomi has some problems with SSL error handling on certain versions of Python. If using version 6.0 of pyVmomi, Python 2.6, Python 2.7.9, or newer must be present. This is due to an upstream dependency in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the version of Python is not in the supported range, you will need to install an earlier version of pyVmomi. See `Issue #29537`_ for more information. .. _Issue #29537: https://github.com/saltstack/salt/issues/29537 Based on the note above, to install an earlier version of pyVmomi than the version currently listed in PyPi, run the following: .. code-block:: bash pip install pyVmomi==5.5.0.2014.1.1 The 5.5.0.2014.1.1 is a known stable version that this original ESXi State Module was developed against. ESXCLI ------ Currently, about a third of the functions used in the vSphere Execution Module require the ESXCLI package be installed on the machine running the Proxy Minion process. The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware provides vCLI package installation instructions for `vSphere 5.5`_ and `vSphere 6.0`_. .. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html .. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html Once all of the required dependencies are in place and the vCLI package is installed, you can check to see if you can connect to your ESXi host or vCenter server by running the following command: .. code-block:: bash esxcli -s <host-location> -u <username> -p <password> system syslog config get If the connection was successful, ESXCLI was successfully installed on your system. You should see output related to the ESXi host's syslog configuration. .. note:: Be aware that some functionality in this state module may depend on the type of license attached to the ESXi host. For example, certain services are only available to manipulate service state or policies with a VMware vSphere Enterprise or Enterprise Plus license, while others are available with a Standard license. The ``ntpd`` service is restricted to an Enterprise Plus license, while ``ssh`` is available via the Standard license. Please see the `vSphere Comparison`_ page for more information. .. _vSphere Comparison: https://www.vmware.com/products/vsphere/compare About ----- This state module was written to be used in conjunction with Salt's :mod:`ESXi Proxy Minion <salt.proxy.esxi>`. For a tutorial on how to use Salt's ESXi Proxy Minion, please refer to the :ref:`ESXi Proxy Minion Tutorial <tutorial-esxi-proxy>` for configuration examples, dependency installation instructions, how to run remote execution functions against ESXi hosts via a Salt Proxy Minion, and a larger state example. ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import logging import sys import re # Import Salt Libs from salt.ext import six import salt.utils.files from salt.exceptions import CommandExecutionError, InvalidConfigError, \ VMwareObjectRetrievalError, VMwareSaltError, VMwareApiError, \ ArgumentValueError from salt.utils.decorators import depends from salt.config.schemas.esxi import DiskGroupsDiskScsiAddressSchema, \ HostCacheSchema # External libraries try: import jsonschema HAS_JSONSCHEMA = True except ImportError: HAS_JSONSCHEMA = False # Get Logging Started log = logging.getLogger(__name__) try: from pyVmomi import VmomiSupport # We check the supported vim versions to infer the pyVmomi version if 'vim25/6.0' in VmomiSupport.versionMap and \ sys.version_info > (2, 7) and sys.version_info < (2, 7, 9): log.debug('pyVmomi not loaded: Incompatible versions ' 'of Python. See Issue #29537.') raise ImportError() HAS_PYVMOMI = True except ImportError: HAS_PYVMOMI = False def __virtual__(): return 'esxi.cmd' in __salt__ def coredump_configured(name, enabled, dump_ip, host_vnic='vmk0', dump_port=6500): ''' Ensures a host's core dump configuration. name Name of the state. enabled Sets whether or not ESXi core dump collection should be enabled. This is a boolean value set to ``True`` or ``False`` to enable or disable core dumps. Note that ESXi requires that the core dump must be enabled before any other parameters may be set. This also affects the ``changes`` results in the state return dictionary. If ``enabled`` is ``False``, we can't obtain any previous settings to compare other state variables, resulting in many ``old`` references returning ``None``. Once ``enabled`` is ``True`` the ``changes`` dictionary comparisons will be more accurate. This is due to the way the system coredemp network configuration command returns data. dump_ip The IP address of host that will accept the dump. host_vnic Host VNic port through which to communicate. Defaults to ``vmk0``. dump_port TCP port to use for the dump. Defaults to ``6500``. Example: .. code-block:: yaml configure-host-coredump: esxi.coredump_configured: - enabled: True - dump_ip: 'my-coredump-ip.example.com' ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} esxi_cmd = 'esxi.cmd' enabled_msg = 'ESXi requires that the core dump must be enabled ' \ 'before any other parameters may be set.' host = __pillar__['proxy']['host'] current_config = __salt__[esxi_cmd]('get_coredump_network_config').get(host) error = current_config.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret current_config = current_config.get('Coredump Config') current_enabled = current_config.get('enabled') # Configure coredump enabled state, if there are changes. if current_enabled != enabled: enabled_changes = {'enabled': {'old': current_enabled, 'new': enabled}} # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('coredump_network_enable', enabled=enabled).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret # Allow users to disable core dump, but then return since # nothing else can be set if core dump is disabled. if not enabled: ret['result'] = True ret['comment'] = enabled_msg ret['changes'].update(enabled_changes) return ret ret['changes'].update(enabled_changes) elif not enabled: # If current_enabled and enabled match, but are both False, # We must return before configuring anything. This isn't a # failure as core dump may be disabled intentionally. ret['result'] = True ret['comment'] = enabled_msg return ret # Test for changes with all remaining configurations. The changes flag is used # To detect changes, and then set_coredump_network_config is called one time. changes = False current_ip = current_config.get('ip') if current_ip != dump_ip: ret['changes'].update({'dump_ip': {'old': current_ip, 'new': dump_ip}}) changes = True current_vnic = current_config.get('host_vnic') if current_vnic != host_vnic: ret['changes'].update({'host_vnic': {'old': current_vnic, 'new': host_vnic}}) changes = True current_port = current_config.get('port') if current_port != six.text_type(dump_port): ret['changes'].update({'dump_port': {'old': current_port, 'new': six.text_type(dump_port)}}) changes = True # Only run the command if not using test=True and changes were detected. if not __opts__['test'] and changes is True: response = __salt__[esxi_cmd]('set_coredump_network_config', dump_ip=dump_ip, host_vnic=host_vnic, dump_port=dump_port).get(host) if response.get('success') is False: msg = response.get('stderr') if not msg: msg = response.get('stdout') ret['comment'] = 'Error: {0}'.format(msg) return ret ret['result'] = True if ret['changes'] == {}: ret['comment'] = 'Core Dump configuration is already in the desired state.' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'Core dump configuration will change.' return ret def password_present(name, password): ''' Ensures the given password is set on the ESXi host. Passwords cannot be obtained from host, so if a password is set in this state, the ``vsphere.update_host_password`` function will always run (except when using test=True functionality) and the state's changes dictionary will always be populated. The username for which the password will change is the same username that is used to authenticate against the ESXi host via the Proxy Minion. For example, if the pillar definition for the proxy username is defined as ``root``, then the username that the password will be updated for via this state is ``root``. name Name of the state. password The new password to change on the host. Example: .. code-block:: yaml configure-host-password: esxi.password_present: - password: 'new-bad-password' ''' ret = {'name': name, 'result': True, 'changes': {'old': 'unknown', 'new': '********'}, 'comment': 'Host password was updated.'} esxi_cmd = 'esxi.cmd' if __opts__['test']: ret['result'] = None ret['comment'] = 'Host password will change.' return ret else: try: __salt__[esxi_cmd]('update_host_password', new_password=password) except CommandExecutionError as err: ret['result'] = False ret['comment'] = 'Error: {0}'.format(err) return ret return ret def vmotion_configured(name, enabled, device='vmk0'): ''' Configures a host's VMotion properties such as enabling VMotion and setting the device VirtualNic that VMotion will use. name Name of the state. enabled Ensures whether or not VMotion should be enabled on a host as a boolean value where ``True`` indicates that VMotion should be enabled and ``False`` indicates that VMotion should be disabled. device The device that uniquely identifies the VirtualNic that will be used for VMotion for the host. Defaults to ``vmk0``. Example: .. code-block:: yaml configure-vmotion: esxi.vmotion_configured: - enabled: True - device: sample-device ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} esxi_cmd = 'esxi.cmd' host = __pillar__['proxy']['host'] current_vmotion_enabled = __salt__[esxi_cmd]('get_vmotion_enabled').get(host) current_vmotion_enabled = current_vmotion_enabled.get('VMotion Enabled') # Configure VMotion Enabled state, if changed. if enabled != current_vmotion_enabled: # Only run the command if not using test=True if not __opts__['test']: # Enable VMotion if enabled=True if enabled is True: response = __salt__[esxi_cmd]('vmotion_enable', device=device).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret # Disable VMotion if enabled=False else: response = __salt__[esxi_cmd]('vmotion_disable').get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'enabled': {'old': current_vmotion_enabled, 'new': enabled}}) ret['result'] = True if ret['changes'] == {}: ret['comment'] = 'VMotion configuration is already in the desired state.' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'VMotion configuration will change.' return ret def vsan_configured(name, enabled, add_disks_to_vsan=False): ''' Configures a host's VSAN properties such as enabling or disabling VSAN, or adding VSAN-eligible disks to the VSAN system for the host. name Name of the state. enabled Ensures whether or not VSAN should be enabled on a host as a boolean value where ``True`` indicates that VSAN should be enabled and ``False`` indicates that VSAN should be disabled. add_disks_to_vsan If set to ``True``, any VSAN-eligible disks for the given host will be added to the host's VSAN system. Default is ``False``. Example: .. code-block:: yaml configure-host-vsan: esxi.vsan_configured: - enabled: True - add_disks_to_vsan: True ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} esxi_cmd = 'esxi.cmd' host = __pillar__['proxy']['host'] current_vsan_enabled = __salt__[esxi_cmd]('get_vsan_enabled').get(host) error = current_vsan_enabled.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret current_vsan_enabled = current_vsan_enabled.get('VSAN Enabled') # Configure VSAN Enabled state, if changed. if enabled != current_vsan_enabled: # Only run the command if not using test=True if not __opts__['test']: # Enable VSAN if enabled=True if enabled is True: response = __salt__[esxi_cmd]('vsan_enable').get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret # Disable VSAN if enabled=False else: response = __salt__[esxi_cmd]('vsan_disable').get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'enabled': {'old': current_vsan_enabled, 'new': enabled}}) # Add any eligible disks to VSAN, if requested. if add_disks_to_vsan: current_eligible_disks = __salt__[esxi_cmd]('get_vsan_eligible_disks').get(host) error = current_eligible_disks.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret disks = current_eligible_disks.get('Eligible') if disks and isinstance(disks, list): # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('vsan_add_disks').get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'add_disks_to_vsan': {'old': '', 'new': disks}}) ret['result'] = True if ret['changes'] == {}: ret['comment'] = 'VSAN configuration is already in the desired state.' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'VSAN configuration will change.' return ret def ssh_configured(name, service_running, ssh_key=None, ssh_key_file=None, service_policy=None, service_restart=False, certificate_verify=False): ''' Manage the SSH configuration for a host including whether or not SSH is running or the presence of a given SSH key. Note: Only one ssh key can be uploaded for root. Uploading a second key will replace any existing key. name Name of the state. service_running Ensures whether or not the SSH service should be running on a host. Represented as a boolean value where ``True`` indicates that SSH should be running and ``False`` indicates that SSH should stopped. In order to update SSH keys, the SSH service must be running. ssh_key Public SSH key to added to the authorized_keys file on the ESXi host. You can use ``ssh_key`` or ``ssh_key_file``, but not both. ssh_key_file File containing the public SSH key to be added to the authorized_keys file on the ESXi host. You can use ``ssh_key_file`` or ``ssh_key``, but not both. service_policy The policy to set for the NTP service. .. note:: When setting the service policy to ``off`` or ``on``, you *must* quote the setting. If you don't, the yaml parser will set the string to a boolean, which will cause trouble checking for stateful changes and will error when trying to set the policy on the ESXi host. service_restart If set to ``True``, the SSH service will be restarted, regardless of its previous running state. Default is ``False``. certificate_verify If set to ``True``, the SSL connection must present a valid certificate. Default is ``False``. Example: .. code-block:: yaml configure-host-ssh: esxi.ssh_configured: - service_running: True - ssh_key_file: /etc/salt/ssh_keys/my_key.pub - service_policy: 'on' - service_restart: True - certificate_verify: True ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} esxi_cmd = 'esxi.cmd' host = __pillar__['proxy']['host'] ssh = 'ssh' ssh_running = __salt__[esxi_cmd]('get_service_running', service_name=ssh).get(host) error = ssh_running.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ssh_running = ssh_running.get(ssh) # Configure SSH service_running state, if changed. if service_running != ssh_running: # Only actually run the command if not using test=True if not __opts__['test']: # Start SSH if service_running=True if service_running is True: enable = __salt__[esxi_cmd]('service_start', service_name=ssh).get(host) error = enable.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret # Disable SSH if service_running=False else: disable = __salt__[esxi_cmd]('service_stop', service_name=ssh).get(host) error = disable.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'service_running': {'old': ssh_running, 'new': service_running}}) # If uploading an SSH key or SSH key file, see if there's a current # SSH key and compare the current key to the key set in the state. current_ssh_key, ssh_key_changed = None, False if ssh_key or ssh_key_file: current_ssh_key = __salt__[esxi_cmd]('get_ssh_key', certificate_verify=certificate_verify) error = current_ssh_key.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret current_ssh_key = current_ssh_key.get('key') if current_ssh_key: clean_current_key = _strip_key(current_ssh_key).split(' ') if not ssh_key: ssh_key = '' # Open ssh key file and read in contents to create one key string with salt.utils.files.fopen(ssh_key_file, 'r') as key_file: for line in key_file: if line.startswith('#'): # Commented line continue ssh_key = ssh_key + line clean_ssh_key = _strip_key(ssh_key).split(' ') # Check that the first two list items of clean key lists are equal. if clean_current_key[0] != clean_ssh_key[0] or clean_current_key[1] != clean_ssh_key[1]: ssh_key_changed = True else: # If current_ssh_key is None, but we're setting a new key with # either ssh_key or ssh_key_file, then we need to flag the change. ssh_key_changed = True # Upload SSH key, if changed. if ssh_key_changed: if not __opts__['test']: # Upload key response = __salt__[esxi_cmd]('upload_ssh_key', ssh_key=ssh_key, ssh_key_file=ssh_key_file, certificate_verify=certificate_verify) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'SSH Key': {'old': current_ssh_key, 'new': ssh_key if ssh_key else ssh_key_file}}) # Configure service_policy if service_policy: current_service_policy = __salt__[esxi_cmd]('get_service_policy', service_name=ssh).get(host) error = current_service_policy.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret current_service_policy = current_service_policy.get(ssh) if service_policy != current_service_policy: # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('set_service_policy', service_name=ssh, service_policy=service_policy).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'service_policy': {'old': current_service_policy, 'new': service_policy}}) # Restart ssh_service if service_restart=True if service_restart: # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('service_restart', service_name=ssh).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'service_restart': {'old': '', 'new': 'SSH service restarted.'}}) ret['result'] = True if ret['changes'] == {}: ret['comment'] = 'SSH service is already in the desired state.' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'SSH service state will change.' return ret def syslog_configured(name, syslog_configs, firewall=True, reset_service=True, reset_syslog_config=False, reset_configs=None): ''' Ensures the specified syslog configuration parameters. By default, this state will reset the syslog service after any new or changed parameters are set successfully. name Name of the state. syslog_configs Name of parameter to set (corresponds to the command line switch for esxcli without the double dashes (--)) Valid syslog_config values are ``logdir``, ``loghost``, ``logdir-unique``, ``default-rotate``, ``default-size``, and ``default-timeout``. Each syslog_config option also needs a configuration value to set. For example, ``loghost`` requires URLs or IP addresses to use for logging. Multiple log servers can be specified by listing them, comma-separated, but without spaces before or after commas (reference: https://blogs.vmware.com/vsphere/2012/04/configuring-multiple-syslog-servers-for-esxi-5.html) firewall Enable the firewall rule set for syslog. Defaults to ``True``. reset_service After a successful parameter set, reset the service. Defaults to ``True``. reset_syslog_config Resets the syslog service to it's default settings. Defaults to ``False``. If set to ``True``, default settings defined by the list of syslog configs in ``reset_configs`` will be reset before running any other syslog settings. reset_configs A comma-delimited list of parameters to reset. Only runs if ``reset_syslog_config`` is set to ``True``. If ``reset_syslog_config`` is set to ``True``, but no syslog configs are listed in ``reset_configs``, then ``reset_configs`` will be set to ``all`` by default. See ``syslog_configs`` parameter above for a list of valid options. Example: .. code-block:: yaml configure-host-syslog: esxi.syslog_configured: - syslog_configs: loghost: ssl://localhost:5432,tcp://10.1.0.1:1514 default-timeout: 120 - firewall: True - reset_service: True - reset_syslog_config: True - reset_configs: loghost,default-timeout ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} esxi_cmd = 'esxi.cmd' host = __pillar__['proxy']['host'] if reset_syslog_config: if not reset_configs: reset_configs = 'all' # Only run the command if not using test=True if not __opts__['test']: reset = __salt__[esxi_cmd]('reset_syslog_config', syslog_config=reset_configs).get(host) for key, val in six.iteritems(reset): if isinstance(val, bool): continue if not val.get('success'): msg = val.get('message') if not msg: msg = 'There was an error resetting a syslog config \'{0}\'.' \ 'Please check debug logs.'.format(val) ret['comment'] = 'Error: {0}'.format(msg) return ret ret['changes'].update({'reset_syslog_config': {'old': '', 'new': reset_configs}}) current_firewall = __salt__[esxi_cmd]('get_firewall_status').get(host) error = current_firewall.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret current_firewall = current_firewall.get('rulesets').get('syslog') if current_firewall != firewall: # Only run the command if not using test=True if not __opts__['test']: enabled = __salt__[esxi_cmd]('enable_firewall_ruleset', ruleset_enable=firewall, ruleset_name='syslog').get(host) if enabled.get('retcode') != 0: err = enabled.get('stderr') out = enabled.get('stdout') ret['comment'] = 'Error: {0}'.format(err if err else out) return ret ret['changes'].update({'firewall': {'old': current_firewall, 'new': firewall}}) current_syslog_config = __salt__[esxi_cmd]('get_syslog_config').get(host) for key, val in six.iteritems(syslog_configs): # The output of get_syslog_config has different keys than the keys # Used to set syslog_config values. We need to look them up first. try: lookup_key = _lookup_syslog_config(key) except KeyError: ret['comment'] = '\'{0}\' is not a valid config variable.'.format(key) return ret current_val = current_syslog_config[lookup_key] if six.text_type(current_val) != six.text_type(val): # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('set_syslog_config', syslog_config=key, config_value=val, firewall=firewall, reset_service=reset_service).get(host) success = response.get(key).get('success') if not success: msg = response.get(key).get('message') if not msg: msg = 'There was an error setting syslog config \'{0}\'. ' \ 'Please check debug logs.'.format(key) ret['comment'] = msg return ret if not ret['changes'].get('syslog_config'): ret['changes'].update({'syslog_config': {}}) ret['changes']['syslog_config'].update({key: {'old': current_val, 'new': val}}) ret['result'] = True if ret['changes'] == {}: ret['comment'] = 'Syslog is already in the desired state.' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'Syslog state will change.' return ret @depends(HAS_PYVMOMI) @depends(HAS_JSONSCHEMA) def diskgroups_configured(name, diskgroups, erase_disks=False): ''' Configures the disk groups to use for vsan. This function will do the following: 1. Check whether or not all disks in the diskgroup spec exist, and raises and errors if they do not. 2. Create diskgroups with the correct disk configurations if diskgroup (identified by the cache disk canonical name) doesn't exist 3. Adds extra capacity disks to the existing diskgroup Example: .. code:: python { 'cache_scsi_addr': 'vmhba1:C0:T0:L0', 'capacity_scsi_addrs': [ 'vmhba2:C0:T0:L0', 'vmhba3:C0:T0:L0', 'vmhba4:C0:T0:L0', ] } name Mandatory state name diskgroups Disk group representation containing scsi disk addresses. Scsi addresses are expected for disks in the diskgroup: erase_disks Specifies whether to erase all partitions on all disks member of the disk group before the disk group is created. Default value is False. ''' proxy_details = __salt__['esxi.get_details']() hostname = proxy_details['host'] if not proxy_details.get('vcenter') \ else proxy_details['esxi_host'] log.info('Running state %s for host \'%s\'', name, hostname) # Variable used to return the result of the invocation ret = {'name': name, 'result': None, 'changes': {}, 'comments': None} # Signals if errors have been encountered errors = False # Signals if changes are required changes = False comments = [] diskgroup_changes = {} si = None try: log.trace('Validating diskgroups_configured input') schema = DiskGroupsDiskScsiAddressSchema.serialize() try: jsonschema.validate({'diskgroups': diskgroups, 'erase_disks': erase_disks}, schema) except jsonschema.exceptions.ValidationError as exc: raise InvalidConfigError(exc) si = __salt__['vsphere.get_service_instance_via_proxy']() host_disks = __salt__['vsphere.list_disks'](service_instance=si) if not host_disks: raise VMwareObjectRetrievalError( 'No disks retrieved from host \'{0}\''.format(hostname)) scsi_addr_to_disk_map = {d['scsi_address']: d for d in host_disks} log.trace('scsi_addr_to_disk_map = %s', scsi_addr_to_disk_map) existing_diskgroups = \ __salt__['vsphere.list_diskgroups'](service_instance=si) cache_disk_to_existing_diskgroup_map = \ {dg['cache_disk']: dg for dg in existing_diskgroups} except CommandExecutionError as err: log.error('Error: %s', err) if si: __salt__['vsphere.disconnect'](si) ret.update({ 'result': False if not __opts__['test'] else None, 'comment': six.text_type(err)}) return ret # Iterate through all of the disk groups for idx, dg in enumerate(diskgroups): # Check for cache disk if not dg['cache_scsi_addr'] in scsi_addr_to_disk_map: comments.append('No cache disk with scsi address \'{0}\' was ' 'found.'.format(dg['cache_scsi_addr'])) log.error(comments[-1]) errors = True continue # Check for capacity disks cache_disk_id = scsi_addr_to_disk_map[dg['cache_scsi_addr']]['id'] cache_disk_display = '{0} (id:{1})'.format(dg['cache_scsi_addr'], cache_disk_id) bad_scsi_addrs = [] capacity_disk_ids = [] capacity_disk_displays = [] for scsi_addr in dg['capacity_scsi_addrs']: if scsi_addr not in scsi_addr_to_disk_map: bad_scsi_addrs.append(scsi_addr) continue capacity_disk_ids.append(scsi_addr_to_disk_map[scsi_addr]['id']) capacity_disk_displays.append( '{0} (id:{1})'.format(scsi_addr, capacity_disk_ids[-1])) if bad_scsi_addrs: comments.append('Error in diskgroup #{0}: capacity disks with ' 'scsi addresses {1} were not found.' ''.format(idx, ', '.join(['\'{0}\''.format(a) for a in bad_scsi_addrs]))) log.error(comments[-1]) errors = True continue if not cache_disk_to_existing_diskgroup_map.get(cache_disk_id): # A new diskgroup needs to be created log.trace('erase_disks = %s', erase_disks) if erase_disks: if __opts__['test']: comments.append('State {0} will ' 'erase all disks of disk group #{1}; ' 'cache disk: \'{2}\', ' 'capacity disk(s): {3}.' ''.format(name, idx, cache_disk_display, ', '.join( ['\'{}\''.format(a) for a in capacity_disk_displays]))) else: # Erase disk group disks for disk_id in [cache_disk_id] + capacity_disk_ids: __salt__['vsphere.erase_disk_partitions']( disk_id=disk_id, service_instance=si) comments.append('Erased disks of diskgroup #{0}; ' 'cache disk: \'{1}\', capacity disk(s): ' '{2}'.format( idx, cache_disk_display, ', '.join(['\'{0}\''.format(a) for a in capacity_disk_displays]))) log.info(comments[-1]) if __opts__['test']: comments.append('State {0} will create ' 'the disk group #{1}; cache disk: \'{2}\', ' 'capacity disk(s): {3}.' .format(name, idx, cache_disk_display, ', '.join(['\'{0}\''.format(a) for a in capacity_disk_displays]))) log.info(comments[-1]) changes = True continue try: __salt__['vsphere.create_diskgroup'](cache_disk_id, capacity_disk_ids, safety_checks=False, service_instance=si) except VMwareSaltError as err: comments.append('Error creating disk group #{0}: ' '{1}.'.format(idx, err)) log.error(comments[-1]) errors = True continue comments.append('Created disk group #\'{0}\'.'.format(idx)) log.info(comments[-1]) diskgroup_changes[six.text_type(idx)] = \ {'new': {'cache': cache_disk_display, 'capacity': capacity_disk_displays}} changes = True continue # The diskgroup exists; checking the capacity disks log.debug('Disk group #%s exists. Checking capacity disks: %s.', idx, capacity_disk_displays) existing_diskgroup = \ cache_disk_to_existing_diskgroup_map.get(cache_disk_id) existing_capacity_disk_displays = \ ['{0} (id:{1})'.format([d['scsi_address'] for d in host_disks if d['id'] == disk_id][0], disk_id) for disk_id in existing_diskgroup['capacity_disks']] # Populate added disks and removed disks and their displays added_capacity_disk_ids = [] added_capacity_disk_displays = [] removed_capacity_disk_ids = [] removed_capacity_disk_displays = [] for disk_id in capacity_disk_ids: if disk_id not in existing_diskgroup['capacity_disks']: disk_scsi_addr = [d['scsi_address'] for d in host_disks if d['id'] == disk_id][0] added_capacity_disk_ids.append(disk_id) added_capacity_disk_displays.append( '{0} (id:{1})'.format(disk_scsi_addr, disk_id)) for disk_id in existing_diskgroup['capacity_disks']: if disk_id not in capacity_disk_ids: disk_scsi_addr = [d['scsi_address'] for d in host_disks if d['id'] == disk_id][0] removed_capacity_disk_ids.append(disk_id) removed_capacity_disk_displays.append( '{0} (id:{1})'.format(disk_scsi_addr, disk_id)) log.debug('Disk group #%s: existing capacity disk ids: %s; added ' 'capacity disk ids: %s; removed capacity disk ids: %s', idx, existing_capacity_disk_displays, added_capacity_disk_displays, removed_capacity_disk_displays) #TODO revisit this when removing capacity disks is supported if removed_capacity_disk_ids: comments.append( 'Error removing capacity disk(s) {0} from disk group #{1}; ' 'operation is not supported.' ''.format(', '.join(['\'{0}\''.format(id) for id in removed_capacity_disk_displays]), idx)) log.error(comments[-1]) errors = True continue if added_capacity_disk_ids: # Capacity disks need to be added to disk group # Building a string representation of the capacity disks # that need to be added s = ', '.join(['\'{0}\''.format(id) for id in added_capacity_disk_displays]) if __opts__['test']: comments.append('State {0} will add ' 'capacity disk(s) {1} to disk group #{2}.' ''.format(name, s, idx)) log.info(comments[-1]) changes = True continue try: __salt__['vsphere.add_capacity_to_diskgroup']( cache_disk_id, added_capacity_disk_ids, safety_checks=False, service_instance=si) except VMwareSaltError as err: comments.append('Error adding capacity disk(s) {0} to ' 'disk group #{1}: {2}.'.format(s, idx, err)) log.error(comments[-1]) errors = True continue com = ('Added capacity disk(s) {0} to disk group #{1}' ''.format(s, idx)) log.info(com) comments.append(com) diskgroup_changes[six.text_type(idx)] = \ {'new': {'cache': cache_disk_display, 'capacity': capacity_disk_displays}, 'old': {'cache': cache_disk_display, 'capacity': existing_capacity_disk_displays}} changes = True continue # No capacity needs to be added s = ('Disk group #{0} is correctly configured. Nothing to be done.' ''.format(idx)) log.info(s) comments.append(s) __salt__['vsphere.disconnect'](si) #Build the final return message result = (True if not (changes or errors) else # no changes/errors None if __opts__['test'] else # running in test mode False if errors else True) # found errors; defaults to True ret.update({'result': result, 'comment': '\n'.join(comments), 'changes': diskgroup_changes}) return ret @depends(HAS_PYVMOMI) @depends(HAS_JSONSCHEMA) def host_cache_configured(name, enabled, datastore, swap_size='100%', dedicated_backing_disk=False, erase_backing_disk=False): ''' Configures the host cache used for swapping. It will do the following: 1. Checks if backing disk exists 2. Creates the VMFS datastore if doesn't exist (datastore partition will be created and use the entire disk) 3. Raises an error if ``dedicated_backing_disk`` is ``True`` and partitions already exist on the backing disk 4. Configures host_cache to use a portion of the datastore for caching (either a specific size or a percentage of the datastore) Examples Percentage swap size (can't be 100%) .. code:: python { 'enabled': true, 'datastore': { 'backing_disk_scsi_addr': 'vmhba0:C0:T0:L0', 'vmfs_version': 5, 'name': 'hostcache' } 'dedicated_backing_disk': false 'swap_size': '98%', } Fixed sized swap size .. code:: python { 'enabled': true, 'datastore': { 'backing_disk_scsi_addr': 'vmhba0:C0:T0:L0', 'vmfs_version': 5, 'name': 'hostcache' } 'dedicated_backing_disk': true 'swap_size': '10GiB', } name Mandatory state name. enabled Specifies whether the host cache is enabled. datastore Specifies the host cache datastore. swap_size Specifies the size of the host cache swap. Can be a percentage or a value in GiB. Default value is ``100%``. dedicated_backing_disk Specifies whether the backing disk is dedicated to the host cache which means it must have no other partitions. Default is False erase_backing_disk Specifies whether to erase all partitions on the backing disk before the datastore is created. Default value is False. ''' log.trace('enabled = %s', enabled) log.trace('datastore = %s', datastore) log.trace('swap_size = %s', swap_size) log.trace('erase_backing_disk = %s', erase_backing_disk) # Variable used to return the result of the invocation proxy_details = __salt__['esxi.get_details']() hostname = proxy_details['host'] if not proxy_details.get('vcenter') \ else proxy_details['esxi_host'] log.trace('hostname = %s', hostname) log.info('Running host_cache_swap_configured for host \'%s\'', hostname) ret = {'name': hostname, 'comment': 'Default comments', 'result': None, 'changes': {}} result = None if __opts__['test'] else True # We assume success needs_setting = False comments = [] changes = {} si = None try: log.debug('Validating host_cache_configured input') schema = HostCacheSchema.serialize() try: jsonschema.validate({'enabled': enabled, 'datastore': datastore, 'swap_size': swap_size, 'erase_backing_disk': erase_backing_disk}, schema) except jsonschema.exceptions.ValidationError as exc: raise InvalidConfigError(exc) m = re.match(r'(\d+)(%|GiB)', swap_size) swap_size_value = int(m.group(1)) swap_type = m.group(2) log.trace('swap_size_value = %s; swap_type = %s', swap_size_value, swap_type) si = __salt__['vsphere.get_service_instance_via_proxy']() host_cache = __salt__['vsphere.get_host_cache'](service_instance=si) # Check enabled if host_cache['enabled'] != enabled: changes.update({'enabled': {'old': host_cache['enabled'], 'new': enabled}}) needs_setting = True # Check datastores existing_datastores = None if host_cache.get('datastore'): existing_datastores = \ __salt__['vsphere.list_datastores_via_proxy']( datastore_names=[datastore['name']], service_instance=si) # Retrieve backing disks existing_disks = __salt__['vsphere.list_disks']( scsi_addresses=[datastore['backing_disk_scsi_addr']], service_instance=si) if not existing_disks: raise VMwareObjectRetrievalError( 'Disk with scsi address \'{0}\' was not found in host \'{1}\'' ''.format(datastore['backing_disk_scsi_addr'], hostname)) backing_disk = existing_disks[0] backing_disk_display = '{0} (id:{1})'.format( backing_disk['scsi_address'], backing_disk['id']) log.trace('backing_disk = %s', backing_disk_display) existing_datastore = None if not existing_datastores: # Check if disk needs to be erased if erase_backing_disk: if __opts__['test']: comments.append('State {0} will erase ' 'the backing disk \'{1}\' on host \'{2}\'.' ''.format(name, backing_disk_display, hostname)) log.info(comments[-1]) else: # Erase disk __salt__['vsphere.erase_disk_partitions']( disk_id=backing_disk['id'], service_instance=si) comments.append('Erased backing disk \'{0}\' on host ' '\'{1}\'.'.format(backing_disk_display, hostname)) log.info(comments[-1]) # Create the datastore if __opts__['test']: comments.append('State {0} will create ' 'the datastore \'{1}\', with backing disk ' '\'{2}\', on host \'{3}\'.' ''.format(name, datastore['name'], backing_disk_display, hostname)) log.info(comments[-1]) else: if dedicated_backing_disk: # Check backing disk doesn't already have partitions partitions = __salt__['vsphere.list_disk_partitions']( disk_id=backing_disk['id'], service_instance=si) log.trace('partitions = %s', partitions) # We will ignore the mbr partitions non_mbr_partitions = [p for p in partitions if p['format'] != 'mbr'] if non_mbr_partitions: raise VMwareApiError( 'Backing disk \'{0}\' has unexpected partitions' ''.format(backing_disk_display)) __salt__['vsphere.create_vmfs_datastore']( datastore['name'], existing_disks[0]['id'], datastore['vmfs_version'], service_instance=si) comments.append('Created vmfs datastore \'{0}\', backed by ' 'disk \'{1}\', on host \'{2}\'.' ''.format(datastore['name'], backing_disk_display, hostname)) log.info(comments[-1]) changes.update( {'datastore': {'new': {'name': datastore['name'], 'backing_disk': backing_disk_display}}}) existing_datastore = \ __salt__['vsphere.list_datastores_via_proxy']( datastore_names=[datastore['name']], service_instance=si)[0] needs_setting = True else: # Check datastore is backed by the correct disk if not existing_datastores[0].get('backing_disk_ids'): raise VMwareSaltError('Datastore \'{0}\' doesn\'t have a ' 'backing disk' ''.format(datastore['name'])) if backing_disk['id'] not in \ existing_datastores[0]['backing_disk_ids']: raise VMwareSaltError( 'Datastore \'{0}\' is not backed by the correct disk: ' 'expected \'{1}\'; got {2}' ''.format( datastore['name'], backing_disk['id'], ', '.join( ['\'{0}\''.format(disk) for disk in existing_datastores[0]['backing_disk_ids']]))) comments.append('Datastore \'{0}\' already exists on host \'{1}\' ' 'and is backed by disk \'{2}\'. Nothing to be ' 'done.'.format(datastore['name'], hostname, backing_disk_display)) existing_datastore = existing_datastores[0] log.trace('existing_datastore = %s', existing_datastore) log.info(comments[-1]) if existing_datastore: # The following comparisons can be done if the existing_datastore # is set; it may not be set if running in test mode # # We support percent, as well as MiB, we will convert the size # to MiB, multiples of 1024 (VMware SDK limitation) if swap_type == '%': # Percentage swap size # Convert from bytes to MiB raw_size_MiB = (swap_size_value/100.0) * \ (existing_datastore['capacity']/1024/1024) else: raw_size_MiB = swap_size_value * 1024 log.trace('raw_size = %sMiB', raw_size_MiB) swap_size_MiB = int(raw_size_MiB/1024)*1024 log.trace('adjusted swap_size = %sMiB', swap_size_MiB) existing_swap_size_MiB = 0 m = re.match(r'(\d+)MiB', host_cache.get('swap_size')) if \ host_cache.get('swap_size') else None if m: # if swap_size from the host is set and has an expected value # we are going to parse it to get the number of MiBs existing_swap_size_MiB = int(m.group(1)) if not existing_swap_size_MiB == swap_size_MiB: needs_setting = True changes.update( {'swap_size': {'old': '{}GiB'.format(existing_swap_size_MiB/1024), 'new': '{}GiB'.format(swap_size_MiB/1024)}}) if needs_setting: if __opts__['test']: comments.append('State {0} will configure ' 'the host cache on host \'{1}\' to: {2}.' ''.format(name, hostname, {'enabled': enabled, 'datastore_name': datastore['name'], 'swap_size': swap_size})) else: if (existing_datastore['capacity'] / 1024.0**2) < \ swap_size_MiB: raise ArgumentValueError( 'Capacity of host cache datastore \'{0}\' ({1} MiB) is ' 'smaller than the required swap size ({2} MiB)' ''.format(existing_datastore['name'], existing_datastore['capacity'] / 1024.0**2, swap_size_MiB)) __salt__['vsphere.configure_host_cache']( enabled, datastore['name'], swap_size_MiB=swap_size_MiB, service_instance=si) comments.append('Host cache configured on host ' '\'{0}\'.'.format(hostname)) else: comments.append('Host cache on host \'{0}\' is already correctly ' 'configured. Nothing to be done.'.format(hostname)) result = True __salt__['vsphere.disconnect'](si) log.info(comments[-1]) ret.update({'comment': '\n'.join(comments), 'result': result, 'changes': changes}) return ret except CommandExecutionError as err: log.error('Error: %s.', err) if si: __salt__['vsphere.disconnect'](si) ret.update({ 'result': False if not __opts__['test'] else None, 'comment': '{}.'.format(err)}) return ret def _lookup_syslog_config(config): ''' Helper function that looks up syslog_config keys available from ``vsphere.get_syslog_config``. ''' lookup = {'default-timeout': 'Default Network Retry Timeout', 'logdir': 'Local Log Output', 'default-size': 'Local Logging Default Rotation Size', 'logdir-unique': 'Log To Unique Subdirectory', 'default-rotate': 'Local Logging Default Rotations', 'loghost': 'Remote Host'} return lookup.get(config) def _strip_key(key_string): ''' Strips an SSH key string of white space and line endings and returns the new string. key_string The string to be stripped. ''' key_string.strip() key_string.replace('\n', '') key_string.replace('\r\n', '') return key_string
saltstack/salt
salt/states/esxi.py
vmotion_configured
python
def vmotion_configured(name, enabled, device='vmk0'): ''' Configures a host's VMotion properties such as enabling VMotion and setting the device VirtualNic that VMotion will use. name Name of the state. enabled Ensures whether or not VMotion should be enabled on a host as a boolean value where ``True`` indicates that VMotion should be enabled and ``False`` indicates that VMotion should be disabled. device The device that uniquely identifies the VirtualNic that will be used for VMotion for the host. Defaults to ``vmk0``. Example: .. code-block:: yaml configure-vmotion: esxi.vmotion_configured: - enabled: True - device: sample-device ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} esxi_cmd = 'esxi.cmd' host = __pillar__['proxy']['host'] current_vmotion_enabled = __salt__[esxi_cmd]('get_vmotion_enabled').get(host) current_vmotion_enabled = current_vmotion_enabled.get('VMotion Enabled') # Configure VMotion Enabled state, if changed. if enabled != current_vmotion_enabled: # Only run the command if not using test=True if not __opts__['test']: # Enable VMotion if enabled=True if enabled is True: response = __salt__[esxi_cmd]('vmotion_enable', device=device).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret # Disable VMotion if enabled=False else: response = __salt__[esxi_cmd]('vmotion_disable').get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'enabled': {'old': current_vmotion_enabled, 'new': enabled}}) ret['result'] = True if ret['changes'] == {}: ret['comment'] = 'VMotion configuration is already in the desired state.' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'VMotion configuration will change.' return ret
Configures a host's VMotion properties such as enabling VMotion and setting the device VirtualNic that VMotion will use. name Name of the state. enabled Ensures whether or not VMotion should be enabled on a host as a boolean value where ``True`` indicates that VMotion should be enabled and ``False`` indicates that VMotion should be disabled. device The device that uniquely identifies the VirtualNic that will be used for VMotion for the host. Defaults to ``vmk0``. Example: .. code-block:: yaml configure-vmotion: esxi.vmotion_configured: - enabled: True - device: sample-device
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/esxi.py#L500-L569
null
# -*- coding: utf-8 -*- ''' Manage VMware ESXi Hosts. .. versionadded:: 2015.8.4 Dependencies ============ - pyVmomi Python Module - ESXCLI pyVmomi ------- PyVmomi can be installed via pip: .. code-block:: bash pip install pyVmomi .. note:: Version 6.0 of pyVmomi has some problems with SSL error handling on certain versions of Python. If using version 6.0 of pyVmomi, Python 2.6, Python 2.7.9, or newer must be present. This is due to an upstream dependency in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the version of Python is not in the supported range, you will need to install an earlier version of pyVmomi. See `Issue #29537`_ for more information. .. _Issue #29537: https://github.com/saltstack/salt/issues/29537 Based on the note above, to install an earlier version of pyVmomi than the version currently listed in PyPi, run the following: .. code-block:: bash pip install pyVmomi==5.5.0.2014.1.1 The 5.5.0.2014.1.1 is a known stable version that this original ESXi State Module was developed against. ESXCLI ------ Currently, about a third of the functions used in the vSphere Execution Module require the ESXCLI package be installed on the machine running the Proxy Minion process. The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware provides vCLI package installation instructions for `vSphere 5.5`_ and `vSphere 6.0`_. .. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html .. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html Once all of the required dependencies are in place and the vCLI package is installed, you can check to see if you can connect to your ESXi host or vCenter server by running the following command: .. code-block:: bash esxcli -s <host-location> -u <username> -p <password> system syslog config get If the connection was successful, ESXCLI was successfully installed on your system. You should see output related to the ESXi host's syslog configuration. .. note:: Be aware that some functionality in this state module may depend on the type of license attached to the ESXi host. For example, certain services are only available to manipulate service state or policies with a VMware vSphere Enterprise or Enterprise Plus license, while others are available with a Standard license. The ``ntpd`` service is restricted to an Enterprise Plus license, while ``ssh`` is available via the Standard license. Please see the `vSphere Comparison`_ page for more information. .. _vSphere Comparison: https://www.vmware.com/products/vsphere/compare About ----- This state module was written to be used in conjunction with Salt's :mod:`ESXi Proxy Minion <salt.proxy.esxi>`. For a tutorial on how to use Salt's ESXi Proxy Minion, please refer to the :ref:`ESXi Proxy Minion Tutorial <tutorial-esxi-proxy>` for configuration examples, dependency installation instructions, how to run remote execution functions against ESXi hosts via a Salt Proxy Minion, and a larger state example. ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import logging import sys import re # Import Salt Libs from salt.ext import six import salt.utils.files from salt.exceptions import CommandExecutionError, InvalidConfigError, \ VMwareObjectRetrievalError, VMwareSaltError, VMwareApiError, \ ArgumentValueError from salt.utils.decorators import depends from salt.config.schemas.esxi import DiskGroupsDiskScsiAddressSchema, \ HostCacheSchema # External libraries try: import jsonschema HAS_JSONSCHEMA = True except ImportError: HAS_JSONSCHEMA = False # Get Logging Started log = logging.getLogger(__name__) try: from pyVmomi import VmomiSupport # We check the supported vim versions to infer the pyVmomi version if 'vim25/6.0' in VmomiSupport.versionMap and \ sys.version_info > (2, 7) and sys.version_info < (2, 7, 9): log.debug('pyVmomi not loaded: Incompatible versions ' 'of Python. See Issue #29537.') raise ImportError() HAS_PYVMOMI = True except ImportError: HAS_PYVMOMI = False def __virtual__(): return 'esxi.cmd' in __salt__ def coredump_configured(name, enabled, dump_ip, host_vnic='vmk0', dump_port=6500): ''' Ensures a host's core dump configuration. name Name of the state. enabled Sets whether or not ESXi core dump collection should be enabled. This is a boolean value set to ``True`` or ``False`` to enable or disable core dumps. Note that ESXi requires that the core dump must be enabled before any other parameters may be set. This also affects the ``changes`` results in the state return dictionary. If ``enabled`` is ``False``, we can't obtain any previous settings to compare other state variables, resulting in many ``old`` references returning ``None``. Once ``enabled`` is ``True`` the ``changes`` dictionary comparisons will be more accurate. This is due to the way the system coredemp network configuration command returns data. dump_ip The IP address of host that will accept the dump. host_vnic Host VNic port through which to communicate. Defaults to ``vmk0``. dump_port TCP port to use for the dump. Defaults to ``6500``. Example: .. code-block:: yaml configure-host-coredump: esxi.coredump_configured: - enabled: True - dump_ip: 'my-coredump-ip.example.com' ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} esxi_cmd = 'esxi.cmd' enabled_msg = 'ESXi requires that the core dump must be enabled ' \ 'before any other parameters may be set.' host = __pillar__['proxy']['host'] current_config = __salt__[esxi_cmd]('get_coredump_network_config').get(host) error = current_config.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret current_config = current_config.get('Coredump Config') current_enabled = current_config.get('enabled') # Configure coredump enabled state, if there are changes. if current_enabled != enabled: enabled_changes = {'enabled': {'old': current_enabled, 'new': enabled}} # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('coredump_network_enable', enabled=enabled).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret # Allow users to disable core dump, but then return since # nothing else can be set if core dump is disabled. if not enabled: ret['result'] = True ret['comment'] = enabled_msg ret['changes'].update(enabled_changes) return ret ret['changes'].update(enabled_changes) elif not enabled: # If current_enabled and enabled match, but are both False, # We must return before configuring anything. This isn't a # failure as core dump may be disabled intentionally. ret['result'] = True ret['comment'] = enabled_msg return ret # Test for changes with all remaining configurations. The changes flag is used # To detect changes, and then set_coredump_network_config is called one time. changes = False current_ip = current_config.get('ip') if current_ip != dump_ip: ret['changes'].update({'dump_ip': {'old': current_ip, 'new': dump_ip}}) changes = True current_vnic = current_config.get('host_vnic') if current_vnic != host_vnic: ret['changes'].update({'host_vnic': {'old': current_vnic, 'new': host_vnic}}) changes = True current_port = current_config.get('port') if current_port != six.text_type(dump_port): ret['changes'].update({'dump_port': {'old': current_port, 'new': six.text_type(dump_port)}}) changes = True # Only run the command if not using test=True and changes were detected. if not __opts__['test'] and changes is True: response = __salt__[esxi_cmd]('set_coredump_network_config', dump_ip=dump_ip, host_vnic=host_vnic, dump_port=dump_port).get(host) if response.get('success') is False: msg = response.get('stderr') if not msg: msg = response.get('stdout') ret['comment'] = 'Error: {0}'.format(msg) return ret ret['result'] = True if ret['changes'] == {}: ret['comment'] = 'Core Dump configuration is already in the desired state.' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'Core dump configuration will change.' return ret def password_present(name, password): ''' Ensures the given password is set on the ESXi host. Passwords cannot be obtained from host, so if a password is set in this state, the ``vsphere.update_host_password`` function will always run (except when using test=True functionality) and the state's changes dictionary will always be populated. The username for which the password will change is the same username that is used to authenticate against the ESXi host via the Proxy Minion. For example, if the pillar definition for the proxy username is defined as ``root``, then the username that the password will be updated for via this state is ``root``. name Name of the state. password The new password to change on the host. Example: .. code-block:: yaml configure-host-password: esxi.password_present: - password: 'new-bad-password' ''' ret = {'name': name, 'result': True, 'changes': {'old': 'unknown', 'new': '********'}, 'comment': 'Host password was updated.'} esxi_cmd = 'esxi.cmd' if __opts__['test']: ret['result'] = None ret['comment'] = 'Host password will change.' return ret else: try: __salt__[esxi_cmd]('update_host_password', new_password=password) except CommandExecutionError as err: ret['result'] = False ret['comment'] = 'Error: {0}'.format(err) return ret return ret def ntp_configured(name, service_running, ntp_servers=None, service_policy=None, service_restart=False, update_datetime=False): ''' Ensures a host's NTP server configuration such as setting NTP servers, ensuring the NTP daemon is running or stopped, or restarting the NTP daemon for the ESXi host. name Name of the state. service_running Ensures the running state of the ntp daemon for the host. Boolean value where ``True`` indicates that ntpd should be running and ``False`` indicates that it should be stopped. ntp_servers A list of servers that should be added to the ESXi host's NTP configuration. service_policy The policy to set for the NTP service. .. note:: When setting the service policy to ``off`` or ``on``, you *must* quote the setting. If you don't, the yaml parser will set the string to a boolean, which will cause trouble checking for stateful changes and will error when trying to set the policy on the ESXi host. service_restart If set to ``True``, the ntp daemon will be restarted, regardless of its previous running state. Default is ``False``. update_datetime If set to ``True``, the date/time on the given host will be updated to UTC. Default setting is ``False``. This option should be used with caution since network delays and execution delays can result in time skews. Example: .. code-block:: yaml configure-host-ntp: esxi.ntp_configured: - service_running: True - ntp_servers: - 192.174.1.100 - 192.174.1.200 - service_policy: 'on' - service_restart: True ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} esxi_cmd = 'esxi.cmd' host = __pillar__['proxy']['host'] ntpd = 'ntpd' ntp_config = __salt__[esxi_cmd]('get_ntp_config').get(host) ntp_running = __salt__[esxi_cmd]('get_service_running', service_name=ntpd).get(host) error = ntp_running.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ntp_running = ntp_running.get(ntpd) # Configure NTP Servers for the Host if ntp_servers and set(ntp_servers) != set(ntp_config): # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('set_ntp_config', ntp_servers=ntp_servers).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret # Set changes dictionary for ntp_servers ret['changes'].update({'ntp_servers': {'old': ntp_config, 'new': ntp_servers}}) # Configure service_running state if service_running != ntp_running: # Only run the command if not using test=True if not __opts__['test']: # Start ntdp if service_running=True if ntp_running is True: response = __salt__[esxi_cmd]('service_start', service_name=ntpd).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret # Stop ntpd if service_running=False else: response = __salt__[esxi_cmd]('service_stop', service_name=ntpd).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'service_running': {'old': ntp_running, 'new': service_running}}) # Configure service_policy if service_policy: current_service_policy = __salt__[esxi_cmd]('get_service_policy', service_name=ntpd).get(host) error = current_service_policy.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret current_service_policy = current_service_policy.get(ntpd) if service_policy != current_service_policy: # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('set_service_policy', service_name=ntpd, service_policy=service_policy).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'service_policy': {'old': current_service_policy, 'new': service_policy}}) # Update datetime, if requested. if update_datetime: # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('update_host_datetime').get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'update_datetime': {'old': '', 'new': 'Host datetime was updated.'}}) # Restart ntp_service if service_restart=True if service_restart: # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('service_restart', service_name=ntpd).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'service_restart': {'old': '', 'new': 'NTP Daemon Restarted.'}}) ret['result'] = True if ret['changes'] == {}: ret['comment'] = 'NTP is already in the desired state.' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'NTP state will change.' return ret def vsan_configured(name, enabled, add_disks_to_vsan=False): ''' Configures a host's VSAN properties such as enabling or disabling VSAN, or adding VSAN-eligible disks to the VSAN system for the host. name Name of the state. enabled Ensures whether or not VSAN should be enabled on a host as a boolean value where ``True`` indicates that VSAN should be enabled and ``False`` indicates that VSAN should be disabled. add_disks_to_vsan If set to ``True``, any VSAN-eligible disks for the given host will be added to the host's VSAN system. Default is ``False``. Example: .. code-block:: yaml configure-host-vsan: esxi.vsan_configured: - enabled: True - add_disks_to_vsan: True ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} esxi_cmd = 'esxi.cmd' host = __pillar__['proxy']['host'] current_vsan_enabled = __salt__[esxi_cmd]('get_vsan_enabled').get(host) error = current_vsan_enabled.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret current_vsan_enabled = current_vsan_enabled.get('VSAN Enabled') # Configure VSAN Enabled state, if changed. if enabled != current_vsan_enabled: # Only run the command if not using test=True if not __opts__['test']: # Enable VSAN if enabled=True if enabled is True: response = __salt__[esxi_cmd]('vsan_enable').get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret # Disable VSAN if enabled=False else: response = __salt__[esxi_cmd]('vsan_disable').get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'enabled': {'old': current_vsan_enabled, 'new': enabled}}) # Add any eligible disks to VSAN, if requested. if add_disks_to_vsan: current_eligible_disks = __salt__[esxi_cmd]('get_vsan_eligible_disks').get(host) error = current_eligible_disks.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret disks = current_eligible_disks.get('Eligible') if disks and isinstance(disks, list): # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('vsan_add_disks').get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'add_disks_to_vsan': {'old': '', 'new': disks}}) ret['result'] = True if ret['changes'] == {}: ret['comment'] = 'VSAN configuration is already in the desired state.' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'VSAN configuration will change.' return ret def ssh_configured(name, service_running, ssh_key=None, ssh_key_file=None, service_policy=None, service_restart=False, certificate_verify=False): ''' Manage the SSH configuration for a host including whether or not SSH is running or the presence of a given SSH key. Note: Only one ssh key can be uploaded for root. Uploading a second key will replace any existing key. name Name of the state. service_running Ensures whether or not the SSH service should be running on a host. Represented as a boolean value where ``True`` indicates that SSH should be running and ``False`` indicates that SSH should stopped. In order to update SSH keys, the SSH service must be running. ssh_key Public SSH key to added to the authorized_keys file on the ESXi host. You can use ``ssh_key`` or ``ssh_key_file``, but not both. ssh_key_file File containing the public SSH key to be added to the authorized_keys file on the ESXi host. You can use ``ssh_key_file`` or ``ssh_key``, but not both. service_policy The policy to set for the NTP service. .. note:: When setting the service policy to ``off`` or ``on``, you *must* quote the setting. If you don't, the yaml parser will set the string to a boolean, which will cause trouble checking for stateful changes and will error when trying to set the policy on the ESXi host. service_restart If set to ``True``, the SSH service will be restarted, regardless of its previous running state. Default is ``False``. certificate_verify If set to ``True``, the SSL connection must present a valid certificate. Default is ``False``. Example: .. code-block:: yaml configure-host-ssh: esxi.ssh_configured: - service_running: True - ssh_key_file: /etc/salt/ssh_keys/my_key.pub - service_policy: 'on' - service_restart: True - certificate_verify: True ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} esxi_cmd = 'esxi.cmd' host = __pillar__['proxy']['host'] ssh = 'ssh' ssh_running = __salt__[esxi_cmd]('get_service_running', service_name=ssh).get(host) error = ssh_running.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ssh_running = ssh_running.get(ssh) # Configure SSH service_running state, if changed. if service_running != ssh_running: # Only actually run the command if not using test=True if not __opts__['test']: # Start SSH if service_running=True if service_running is True: enable = __salt__[esxi_cmd]('service_start', service_name=ssh).get(host) error = enable.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret # Disable SSH if service_running=False else: disable = __salt__[esxi_cmd]('service_stop', service_name=ssh).get(host) error = disable.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'service_running': {'old': ssh_running, 'new': service_running}}) # If uploading an SSH key or SSH key file, see if there's a current # SSH key and compare the current key to the key set in the state. current_ssh_key, ssh_key_changed = None, False if ssh_key or ssh_key_file: current_ssh_key = __salt__[esxi_cmd]('get_ssh_key', certificate_verify=certificate_verify) error = current_ssh_key.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret current_ssh_key = current_ssh_key.get('key') if current_ssh_key: clean_current_key = _strip_key(current_ssh_key).split(' ') if not ssh_key: ssh_key = '' # Open ssh key file and read in contents to create one key string with salt.utils.files.fopen(ssh_key_file, 'r') as key_file: for line in key_file: if line.startswith('#'): # Commented line continue ssh_key = ssh_key + line clean_ssh_key = _strip_key(ssh_key).split(' ') # Check that the first two list items of clean key lists are equal. if clean_current_key[0] != clean_ssh_key[0] or clean_current_key[1] != clean_ssh_key[1]: ssh_key_changed = True else: # If current_ssh_key is None, but we're setting a new key with # either ssh_key or ssh_key_file, then we need to flag the change. ssh_key_changed = True # Upload SSH key, if changed. if ssh_key_changed: if not __opts__['test']: # Upload key response = __salt__[esxi_cmd]('upload_ssh_key', ssh_key=ssh_key, ssh_key_file=ssh_key_file, certificate_verify=certificate_verify) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'SSH Key': {'old': current_ssh_key, 'new': ssh_key if ssh_key else ssh_key_file}}) # Configure service_policy if service_policy: current_service_policy = __salt__[esxi_cmd]('get_service_policy', service_name=ssh).get(host) error = current_service_policy.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret current_service_policy = current_service_policy.get(ssh) if service_policy != current_service_policy: # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('set_service_policy', service_name=ssh, service_policy=service_policy).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'service_policy': {'old': current_service_policy, 'new': service_policy}}) # Restart ssh_service if service_restart=True if service_restart: # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('service_restart', service_name=ssh).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'service_restart': {'old': '', 'new': 'SSH service restarted.'}}) ret['result'] = True if ret['changes'] == {}: ret['comment'] = 'SSH service is already in the desired state.' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'SSH service state will change.' return ret def syslog_configured(name, syslog_configs, firewall=True, reset_service=True, reset_syslog_config=False, reset_configs=None): ''' Ensures the specified syslog configuration parameters. By default, this state will reset the syslog service after any new or changed parameters are set successfully. name Name of the state. syslog_configs Name of parameter to set (corresponds to the command line switch for esxcli without the double dashes (--)) Valid syslog_config values are ``logdir``, ``loghost``, ``logdir-unique``, ``default-rotate``, ``default-size``, and ``default-timeout``. Each syslog_config option also needs a configuration value to set. For example, ``loghost`` requires URLs or IP addresses to use for logging. Multiple log servers can be specified by listing them, comma-separated, but without spaces before or after commas (reference: https://blogs.vmware.com/vsphere/2012/04/configuring-multiple-syslog-servers-for-esxi-5.html) firewall Enable the firewall rule set for syslog. Defaults to ``True``. reset_service After a successful parameter set, reset the service. Defaults to ``True``. reset_syslog_config Resets the syslog service to it's default settings. Defaults to ``False``. If set to ``True``, default settings defined by the list of syslog configs in ``reset_configs`` will be reset before running any other syslog settings. reset_configs A comma-delimited list of parameters to reset. Only runs if ``reset_syslog_config`` is set to ``True``. If ``reset_syslog_config`` is set to ``True``, but no syslog configs are listed in ``reset_configs``, then ``reset_configs`` will be set to ``all`` by default. See ``syslog_configs`` parameter above for a list of valid options. Example: .. code-block:: yaml configure-host-syslog: esxi.syslog_configured: - syslog_configs: loghost: ssl://localhost:5432,tcp://10.1.0.1:1514 default-timeout: 120 - firewall: True - reset_service: True - reset_syslog_config: True - reset_configs: loghost,default-timeout ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} esxi_cmd = 'esxi.cmd' host = __pillar__['proxy']['host'] if reset_syslog_config: if not reset_configs: reset_configs = 'all' # Only run the command if not using test=True if not __opts__['test']: reset = __salt__[esxi_cmd]('reset_syslog_config', syslog_config=reset_configs).get(host) for key, val in six.iteritems(reset): if isinstance(val, bool): continue if not val.get('success'): msg = val.get('message') if not msg: msg = 'There was an error resetting a syslog config \'{0}\'.' \ 'Please check debug logs.'.format(val) ret['comment'] = 'Error: {0}'.format(msg) return ret ret['changes'].update({'reset_syslog_config': {'old': '', 'new': reset_configs}}) current_firewall = __salt__[esxi_cmd]('get_firewall_status').get(host) error = current_firewall.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret current_firewall = current_firewall.get('rulesets').get('syslog') if current_firewall != firewall: # Only run the command if not using test=True if not __opts__['test']: enabled = __salt__[esxi_cmd]('enable_firewall_ruleset', ruleset_enable=firewall, ruleset_name='syslog').get(host) if enabled.get('retcode') != 0: err = enabled.get('stderr') out = enabled.get('stdout') ret['comment'] = 'Error: {0}'.format(err if err else out) return ret ret['changes'].update({'firewall': {'old': current_firewall, 'new': firewall}}) current_syslog_config = __salt__[esxi_cmd]('get_syslog_config').get(host) for key, val in six.iteritems(syslog_configs): # The output of get_syslog_config has different keys than the keys # Used to set syslog_config values. We need to look them up first. try: lookup_key = _lookup_syslog_config(key) except KeyError: ret['comment'] = '\'{0}\' is not a valid config variable.'.format(key) return ret current_val = current_syslog_config[lookup_key] if six.text_type(current_val) != six.text_type(val): # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('set_syslog_config', syslog_config=key, config_value=val, firewall=firewall, reset_service=reset_service).get(host) success = response.get(key).get('success') if not success: msg = response.get(key).get('message') if not msg: msg = 'There was an error setting syslog config \'{0}\'. ' \ 'Please check debug logs.'.format(key) ret['comment'] = msg return ret if not ret['changes'].get('syslog_config'): ret['changes'].update({'syslog_config': {}}) ret['changes']['syslog_config'].update({key: {'old': current_val, 'new': val}}) ret['result'] = True if ret['changes'] == {}: ret['comment'] = 'Syslog is already in the desired state.' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'Syslog state will change.' return ret @depends(HAS_PYVMOMI) @depends(HAS_JSONSCHEMA) def diskgroups_configured(name, diskgroups, erase_disks=False): ''' Configures the disk groups to use for vsan. This function will do the following: 1. Check whether or not all disks in the diskgroup spec exist, and raises and errors if they do not. 2. Create diskgroups with the correct disk configurations if diskgroup (identified by the cache disk canonical name) doesn't exist 3. Adds extra capacity disks to the existing diskgroup Example: .. code:: python { 'cache_scsi_addr': 'vmhba1:C0:T0:L0', 'capacity_scsi_addrs': [ 'vmhba2:C0:T0:L0', 'vmhba3:C0:T0:L0', 'vmhba4:C0:T0:L0', ] } name Mandatory state name diskgroups Disk group representation containing scsi disk addresses. Scsi addresses are expected for disks in the diskgroup: erase_disks Specifies whether to erase all partitions on all disks member of the disk group before the disk group is created. Default value is False. ''' proxy_details = __salt__['esxi.get_details']() hostname = proxy_details['host'] if not proxy_details.get('vcenter') \ else proxy_details['esxi_host'] log.info('Running state %s for host \'%s\'', name, hostname) # Variable used to return the result of the invocation ret = {'name': name, 'result': None, 'changes': {}, 'comments': None} # Signals if errors have been encountered errors = False # Signals if changes are required changes = False comments = [] diskgroup_changes = {} si = None try: log.trace('Validating diskgroups_configured input') schema = DiskGroupsDiskScsiAddressSchema.serialize() try: jsonschema.validate({'diskgroups': diskgroups, 'erase_disks': erase_disks}, schema) except jsonschema.exceptions.ValidationError as exc: raise InvalidConfigError(exc) si = __salt__['vsphere.get_service_instance_via_proxy']() host_disks = __salt__['vsphere.list_disks'](service_instance=si) if not host_disks: raise VMwareObjectRetrievalError( 'No disks retrieved from host \'{0}\''.format(hostname)) scsi_addr_to_disk_map = {d['scsi_address']: d for d in host_disks} log.trace('scsi_addr_to_disk_map = %s', scsi_addr_to_disk_map) existing_diskgroups = \ __salt__['vsphere.list_diskgroups'](service_instance=si) cache_disk_to_existing_diskgroup_map = \ {dg['cache_disk']: dg for dg in existing_diskgroups} except CommandExecutionError as err: log.error('Error: %s', err) if si: __salt__['vsphere.disconnect'](si) ret.update({ 'result': False if not __opts__['test'] else None, 'comment': six.text_type(err)}) return ret # Iterate through all of the disk groups for idx, dg in enumerate(diskgroups): # Check for cache disk if not dg['cache_scsi_addr'] in scsi_addr_to_disk_map: comments.append('No cache disk with scsi address \'{0}\' was ' 'found.'.format(dg['cache_scsi_addr'])) log.error(comments[-1]) errors = True continue # Check for capacity disks cache_disk_id = scsi_addr_to_disk_map[dg['cache_scsi_addr']]['id'] cache_disk_display = '{0} (id:{1})'.format(dg['cache_scsi_addr'], cache_disk_id) bad_scsi_addrs = [] capacity_disk_ids = [] capacity_disk_displays = [] for scsi_addr in dg['capacity_scsi_addrs']: if scsi_addr not in scsi_addr_to_disk_map: bad_scsi_addrs.append(scsi_addr) continue capacity_disk_ids.append(scsi_addr_to_disk_map[scsi_addr]['id']) capacity_disk_displays.append( '{0} (id:{1})'.format(scsi_addr, capacity_disk_ids[-1])) if bad_scsi_addrs: comments.append('Error in diskgroup #{0}: capacity disks with ' 'scsi addresses {1} were not found.' ''.format(idx, ', '.join(['\'{0}\''.format(a) for a in bad_scsi_addrs]))) log.error(comments[-1]) errors = True continue if not cache_disk_to_existing_diskgroup_map.get(cache_disk_id): # A new diskgroup needs to be created log.trace('erase_disks = %s', erase_disks) if erase_disks: if __opts__['test']: comments.append('State {0} will ' 'erase all disks of disk group #{1}; ' 'cache disk: \'{2}\', ' 'capacity disk(s): {3}.' ''.format(name, idx, cache_disk_display, ', '.join( ['\'{}\''.format(a) for a in capacity_disk_displays]))) else: # Erase disk group disks for disk_id in [cache_disk_id] + capacity_disk_ids: __salt__['vsphere.erase_disk_partitions']( disk_id=disk_id, service_instance=si) comments.append('Erased disks of diskgroup #{0}; ' 'cache disk: \'{1}\', capacity disk(s): ' '{2}'.format( idx, cache_disk_display, ', '.join(['\'{0}\''.format(a) for a in capacity_disk_displays]))) log.info(comments[-1]) if __opts__['test']: comments.append('State {0} will create ' 'the disk group #{1}; cache disk: \'{2}\', ' 'capacity disk(s): {3}.' .format(name, idx, cache_disk_display, ', '.join(['\'{0}\''.format(a) for a in capacity_disk_displays]))) log.info(comments[-1]) changes = True continue try: __salt__['vsphere.create_diskgroup'](cache_disk_id, capacity_disk_ids, safety_checks=False, service_instance=si) except VMwareSaltError as err: comments.append('Error creating disk group #{0}: ' '{1}.'.format(idx, err)) log.error(comments[-1]) errors = True continue comments.append('Created disk group #\'{0}\'.'.format(idx)) log.info(comments[-1]) diskgroup_changes[six.text_type(idx)] = \ {'new': {'cache': cache_disk_display, 'capacity': capacity_disk_displays}} changes = True continue # The diskgroup exists; checking the capacity disks log.debug('Disk group #%s exists. Checking capacity disks: %s.', idx, capacity_disk_displays) existing_diskgroup = \ cache_disk_to_existing_diskgroup_map.get(cache_disk_id) existing_capacity_disk_displays = \ ['{0} (id:{1})'.format([d['scsi_address'] for d in host_disks if d['id'] == disk_id][0], disk_id) for disk_id in existing_diskgroup['capacity_disks']] # Populate added disks and removed disks and their displays added_capacity_disk_ids = [] added_capacity_disk_displays = [] removed_capacity_disk_ids = [] removed_capacity_disk_displays = [] for disk_id in capacity_disk_ids: if disk_id not in existing_diskgroup['capacity_disks']: disk_scsi_addr = [d['scsi_address'] for d in host_disks if d['id'] == disk_id][0] added_capacity_disk_ids.append(disk_id) added_capacity_disk_displays.append( '{0} (id:{1})'.format(disk_scsi_addr, disk_id)) for disk_id in existing_diskgroup['capacity_disks']: if disk_id not in capacity_disk_ids: disk_scsi_addr = [d['scsi_address'] for d in host_disks if d['id'] == disk_id][0] removed_capacity_disk_ids.append(disk_id) removed_capacity_disk_displays.append( '{0} (id:{1})'.format(disk_scsi_addr, disk_id)) log.debug('Disk group #%s: existing capacity disk ids: %s; added ' 'capacity disk ids: %s; removed capacity disk ids: %s', idx, existing_capacity_disk_displays, added_capacity_disk_displays, removed_capacity_disk_displays) #TODO revisit this when removing capacity disks is supported if removed_capacity_disk_ids: comments.append( 'Error removing capacity disk(s) {0} from disk group #{1}; ' 'operation is not supported.' ''.format(', '.join(['\'{0}\''.format(id) for id in removed_capacity_disk_displays]), idx)) log.error(comments[-1]) errors = True continue if added_capacity_disk_ids: # Capacity disks need to be added to disk group # Building a string representation of the capacity disks # that need to be added s = ', '.join(['\'{0}\''.format(id) for id in added_capacity_disk_displays]) if __opts__['test']: comments.append('State {0} will add ' 'capacity disk(s) {1} to disk group #{2}.' ''.format(name, s, idx)) log.info(comments[-1]) changes = True continue try: __salt__['vsphere.add_capacity_to_diskgroup']( cache_disk_id, added_capacity_disk_ids, safety_checks=False, service_instance=si) except VMwareSaltError as err: comments.append('Error adding capacity disk(s) {0} to ' 'disk group #{1}: {2}.'.format(s, idx, err)) log.error(comments[-1]) errors = True continue com = ('Added capacity disk(s) {0} to disk group #{1}' ''.format(s, idx)) log.info(com) comments.append(com) diskgroup_changes[six.text_type(idx)] = \ {'new': {'cache': cache_disk_display, 'capacity': capacity_disk_displays}, 'old': {'cache': cache_disk_display, 'capacity': existing_capacity_disk_displays}} changes = True continue # No capacity needs to be added s = ('Disk group #{0} is correctly configured. Nothing to be done.' ''.format(idx)) log.info(s) comments.append(s) __salt__['vsphere.disconnect'](si) #Build the final return message result = (True if not (changes or errors) else # no changes/errors None if __opts__['test'] else # running in test mode False if errors else True) # found errors; defaults to True ret.update({'result': result, 'comment': '\n'.join(comments), 'changes': diskgroup_changes}) return ret @depends(HAS_PYVMOMI) @depends(HAS_JSONSCHEMA) def host_cache_configured(name, enabled, datastore, swap_size='100%', dedicated_backing_disk=False, erase_backing_disk=False): ''' Configures the host cache used for swapping. It will do the following: 1. Checks if backing disk exists 2. Creates the VMFS datastore if doesn't exist (datastore partition will be created and use the entire disk) 3. Raises an error if ``dedicated_backing_disk`` is ``True`` and partitions already exist on the backing disk 4. Configures host_cache to use a portion of the datastore for caching (either a specific size or a percentage of the datastore) Examples Percentage swap size (can't be 100%) .. code:: python { 'enabled': true, 'datastore': { 'backing_disk_scsi_addr': 'vmhba0:C0:T0:L0', 'vmfs_version': 5, 'name': 'hostcache' } 'dedicated_backing_disk': false 'swap_size': '98%', } Fixed sized swap size .. code:: python { 'enabled': true, 'datastore': { 'backing_disk_scsi_addr': 'vmhba0:C0:T0:L0', 'vmfs_version': 5, 'name': 'hostcache' } 'dedicated_backing_disk': true 'swap_size': '10GiB', } name Mandatory state name. enabled Specifies whether the host cache is enabled. datastore Specifies the host cache datastore. swap_size Specifies the size of the host cache swap. Can be a percentage or a value in GiB. Default value is ``100%``. dedicated_backing_disk Specifies whether the backing disk is dedicated to the host cache which means it must have no other partitions. Default is False erase_backing_disk Specifies whether to erase all partitions on the backing disk before the datastore is created. Default value is False. ''' log.trace('enabled = %s', enabled) log.trace('datastore = %s', datastore) log.trace('swap_size = %s', swap_size) log.trace('erase_backing_disk = %s', erase_backing_disk) # Variable used to return the result of the invocation proxy_details = __salt__['esxi.get_details']() hostname = proxy_details['host'] if not proxy_details.get('vcenter') \ else proxy_details['esxi_host'] log.trace('hostname = %s', hostname) log.info('Running host_cache_swap_configured for host \'%s\'', hostname) ret = {'name': hostname, 'comment': 'Default comments', 'result': None, 'changes': {}} result = None if __opts__['test'] else True # We assume success needs_setting = False comments = [] changes = {} si = None try: log.debug('Validating host_cache_configured input') schema = HostCacheSchema.serialize() try: jsonschema.validate({'enabled': enabled, 'datastore': datastore, 'swap_size': swap_size, 'erase_backing_disk': erase_backing_disk}, schema) except jsonschema.exceptions.ValidationError as exc: raise InvalidConfigError(exc) m = re.match(r'(\d+)(%|GiB)', swap_size) swap_size_value = int(m.group(1)) swap_type = m.group(2) log.trace('swap_size_value = %s; swap_type = %s', swap_size_value, swap_type) si = __salt__['vsphere.get_service_instance_via_proxy']() host_cache = __salt__['vsphere.get_host_cache'](service_instance=si) # Check enabled if host_cache['enabled'] != enabled: changes.update({'enabled': {'old': host_cache['enabled'], 'new': enabled}}) needs_setting = True # Check datastores existing_datastores = None if host_cache.get('datastore'): existing_datastores = \ __salt__['vsphere.list_datastores_via_proxy']( datastore_names=[datastore['name']], service_instance=si) # Retrieve backing disks existing_disks = __salt__['vsphere.list_disks']( scsi_addresses=[datastore['backing_disk_scsi_addr']], service_instance=si) if not existing_disks: raise VMwareObjectRetrievalError( 'Disk with scsi address \'{0}\' was not found in host \'{1}\'' ''.format(datastore['backing_disk_scsi_addr'], hostname)) backing_disk = existing_disks[0] backing_disk_display = '{0} (id:{1})'.format( backing_disk['scsi_address'], backing_disk['id']) log.trace('backing_disk = %s', backing_disk_display) existing_datastore = None if not existing_datastores: # Check if disk needs to be erased if erase_backing_disk: if __opts__['test']: comments.append('State {0} will erase ' 'the backing disk \'{1}\' on host \'{2}\'.' ''.format(name, backing_disk_display, hostname)) log.info(comments[-1]) else: # Erase disk __salt__['vsphere.erase_disk_partitions']( disk_id=backing_disk['id'], service_instance=si) comments.append('Erased backing disk \'{0}\' on host ' '\'{1}\'.'.format(backing_disk_display, hostname)) log.info(comments[-1]) # Create the datastore if __opts__['test']: comments.append('State {0} will create ' 'the datastore \'{1}\', with backing disk ' '\'{2}\', on host \'{3}\'.' ''.format(name, datastore['name'], backing_disk_display, hostname)) log.info(comments[-1]) else: if dedicated_backing_disk: # Check backing disk doesn't already have partitions partitions = __salt__['vsphere.list_disk_partitions']( disk_id=backing_disk['id'], service_instance=si) log.trace('partitions = %s', partitions) # We will ignore the mbr partitions non_mbr_partitions = [p for p in partitions if p['format'] != 'mbr'] if non_mbr_partitions: raise VMwareApiError( 'Backing disk \'{0}\' has unexpected partitions' ''.format(backing_disk_display)) __salt__['vsphere.create_vmfs_datastore']( datastore['name'], existing_disks[0]['id'], datastore['vmfs_version'], service_instance=si) comments.append('Created vmfs datastore \'{0}\', backed by ' 'disk \'{1}\', on host \'{2}\'.' ''.format(datastore['name'], backing_disk_display, hostname)) log.info(comments[-1]) changes.update( {'datastore': {'new': {'name': datastore['name'], 'backing_disk': backing_disk_display}}}) existing_datastore = \ __salt__['vsphere.list_datastores_via_proxy']( datastore_names=[datastore['name']], service_instance=si)[0] needs_setting = True else: # Check datastore is backed by the correct disk if not existing_datastores[0].get('backing_disk_ids'): raise VMwareSaltError('Datastore \'{0}\' doesn\'t have a ' 'backing disk' ''.format(datastore['name'])) if backing_disk['id'] not in \ existing_datastores[0]['backing_disk_ids']: raise VMwareSaltError( 'Datastore \'{0}\' is not backed by the correct disk: ' 'expected \'{1}\'; got {2}' ''.format( datastore['name'], backing_disk['id'], ', '.join( ['\'{0}\''.format(disk) for disk in existing_datastores[0]['backing_disk_ids']]))) comments.append('Datastore \'{0}\' already exists on host \'{1}\' ' 'and is backed by disk \'{2}\'. Nothing to be ' 'done.'.format(datastore['name'], hostname, backing_disk_display)) existing_datastore = existing_datastores[0] log.trace('existing_datastore = %s', existing_datastore) log.info(comments[-1]) if existing_datastore: # The following comparisons can be done if the existing_datastore # is set; it may not be set if running in test mode # # We support percent, as well as MiB, we will convert the size # to MiB, multiples of 1024 (VMware SDK limitation) if swap_type == '%': # Percentage swap size # Convert from bytes to MiB raw_size_MiB = (swap_size_value/100.0) * \ (existing_datastore['capacity']/1024/1024) else: raw_size_MiB = swap_size_value * 1024 log.trace('raw_size = %sMiB', raw_size_MiB) swap_size_MiB = int(raw_size_MiB/1024)*1024 log.trace('adjusted swap_size = %sMiB', swap_size_MiB) existing_swap_size_MiB = 0 m = re.match(r'(\d+)MiB', host_cache.get('swap_size')) if \ host_cache.get('swap_size') else None if m: # if swap_size from the host is set and has an expected value # we are going to parse it to get the number of MiBs existing_swap_size_MiB = int(m.group(1)) if not existing_swap_size_MiB == swap_size_MiB: needs_setting = True changes.update( {'swap_size': {'old': '{}GiB'.format(existing_swap_size_MiB/1024), 'new': '{}GiB'.format(swap_size_MiB/1024)}}) if needs_setting: if __opts__['test']: comments.append('State {0} will configure ' 'the host cache on host \'{1}\' to: {2}.' ''.format(name, hostname, {'enabled': enabled, 'datastore_name': datastore['name'], 'swap_size': swap_size})) else: if (existing_datastore['capacity'] / 1024.0**2) < \ swap_size_MiB: raise ArgumentValueError( 'Capacity of host cache datastore \'{0}\' ({1} MiB) is ' 'smaller than the required swap size ({2} MiB)' ''.format(existing_datastore['name'], existing_datastore['capacity'] / 1024.0**2, swap_size_MiB)) __salt__['vsphere.configure_host_cache']( enabled, datastore['name'], swap_size_MiB=swap_size_MiB, service_instance=si) comments.append('Host cache configured on host ' '\'{0}\'.'.format(hostname)) else: comments.append('Host cache on host \'{0}\' is already correctly ' 'configured. Nothing to be done.'.format(hostname)) result = True __salt__['vsphere.disconnect'](si) log.info(comments[-1]) ret.update({'comment': '\n'.join(comments), 'result': result, 'changes': changes}) return ret except CommandExecutionError as err: log.error('Error: %s.', err) if si: __salt__['vsphere.disconnect'](si) ret.update({ 'result': False if not __opts__['test'] else None, 'comment': '{}.'.format(err)}) return ret def _lookup_syslog_config(config): ''' Helper function that looks up syslog_config keys available from ``vsphere.get_syslog_config``. ''' lookup = {'default-timeout': 'Default Network Retry Timeout', 'logdir': 'Local Log Output', 'default-size': 'Local Logging Default Rotation Size', 'logdir-unique': 'Log To Unique Subdirectory', 'default-rotate': 'Local Logging Default Rotations', 'loghost': 'Remote Host'} return lookup.get(config) def _strip_key(key_string): ''' Strips an SSH key string of white space and line endings and returns the new string. key_string The string to be stripped. ''' key_string.strip() key_string.replace('\n', '') key_string.replace('\r\n', '') return key_string
saltstack/salt
salt/states/esxi.py
vsan_configured
python
def vsan_configured(name, enabled, add_disks_to_vsan=False): ''' Configures a host's VSAN properties such as enabling or disabling VSAN, or adding VSAN-eligible disks to the VSAN system for the host. name Name of the state. enabled Ensures whether or not VSAN should be enabled on a host as a boolean value where ``True`` indicates that VSAN should be enabled and ``False`` indicates that VSAN should be disabled. add_disks_to_vsan If set to ``True``, any VSAN-eligible disks for the given host will be added to the host's VSAN system. Default is ``False``. Example: .. code-block:: yaml configure-host-vsan: esxi.vsan_configured: - enabled: True - add_disks_to_vsan: True ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} esxi_cmd = 'esxi.cmd' host = __pillar__['proxy']['host'] current_vsan_enabled = __salt__[esxi_cmd]('get_vsan_enabled').get(host) error = current_vsan_enabled.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret current_vsan_enabled = current_vsan_enabled.get('VSAN Enabled') # Configure VSAN Enabled state, if changed. if enabled != current_vsan_enabled: # Only run the command if not using test=True if not __opts__['test']: # Enable VSAN if enabled=True if enabled is True: response = __salt__[esxi_cmd]('vsan_enable').get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret # Disable VSAN if enabled=False else: response = __salt__[esxi_cmd]('vsan_disable').get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'enabled': {'old': current_vsan_enabled, 'new': enabled}}) # Add any eligible disks to VSAN, if requested. if add_disks_to_vsan: current_eligible_disks = __salt__[esxi_cmd]('get_vsan_eligible_disks').get(host) error = current_eligible_disks.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret disks = current_eligible_disks.get('Eligible') if disks and isinstance(disks, list): # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('vsan_add_disks').get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'add_disks_to_vsan': {'old': '', 'new': disks}}) ret['result'] = True if ret['changes'] == {}: ret['comment'] = 'VSAN configuration is already in the desired state.' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'VSAN configuration will change.' return ret
Configures a host's VSAN properties such as enabling or disabling VSAN, or adding VSAN-eligible disks to the VSAN system for the host. name Name of the state. enabled Ensures whether or not VSAN should be enabled on a host as a boolean value where ``True`` indicates that VSAN should be enabled and ``False`` indicates that VSAN should be disabled. add_disks_to_vsan If set to ``True``, any VSAN-eligible disks for the given host will be added to the host's VSAN system. Default is ``False``. Example: .. code-block:: yaml configure-host-vsan: esxi.vsan_configured: - enabled: True - add_disks_to_vsan: True
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/esxi.py#L572-L666
null
# -*- coding: utf-8 -*- ''' Manage VMware ESXi Hosts. .. versionadded:: 2015.8.4 Dependencies ============ - pyVmomi Python Module - ESXCLI pyVmomi ------- PyVmomi can be installed via pip: .. code-block:: bash pip install pyVmomi .. note:: Version 6.0 of pyVmomi has some problems with SSL error handling on certain versions of Python. If using version 6.0 of pyVmomi, Python 2.6, Python 2.7.9, or newer must be present. This is due to an upstream dependency in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the version of Python is not in the supported range, you will need to install an earlier version of pyVmomi. See `Issue #29537`_ for more information. .. _Issue #29537: https://github.com/saltstack/salt/issues/29537 Based on the note above, to install an earlier version of pyVmomi than the version currently listed in PyPi, run the following: .. code-block:: bash pip install pyVmomi==5.5.0.2014.1.1 The 5.5.0.2014.1.1 is a known stable version that this original ESXi State Module was developed against. ESXCLI ------ Currently, about a third of the functions used in the vSphere Execution Module require the ESXCLI package be installed on the machine running the Proxy Minion process. The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware provides vCLI package installation instructions for `vSphere 5.5`_ and `vSphere 6.0`_. .. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html .. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html Once all of the required dependencies are in place and the vCLI package is installed, you can check to see if you can connect to your ESXi host or vCenter server by running the following command: .. code-block:: bash esxcli -s <host-location> -u <username> -p <password> system syslog config get If the connection was successful, ESXCLI was successfully installed on your system. You should see output related to the ESXi host's syslog configuration. .. note:: Be aware that some functionality in this state module may depend on the type of license attached to the ESXi host. For example, certain services are only available to manipulate service state or policies with a VMware vSphere Enterprise or Enterprise Plus license, while others are available with a Standard license. The ``ntpd`` service is restricted to an Enterprise Plus license, while ``ssh`` is available via the Standard license. Please see the `vSphere Comparison`_ page for more information. .. _vSphere Comparison: https://www.vmware.com/products/vsphere/compare About ----- This state module was written to be used in conjunction with Salt's :mod:`ESXi Proxy Minion <salt.proxy.esxi>`. For a tutorial on how to use Salt's ESXi Proxy Minion, please refer to the :ref:`ESXi Proxy Minion Tutorial <tutorial-esxi-proxy>` for configuration examples, dependency installation instructions, how to run remote execution functions against ESXi hosts via a Salt Proxy Minion, and a larger state example. ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import logging import sys import re # Import Salt Libs from salt.ext import six import salt.utils.files from salt.exceptions import CommandExecutionError, InvalidConfigError, \ VMwareObjectRetrievalError, VMwareSaltError, VMwareApiError, \ ArgumentValueError from salt.utils.decorators import depends from salt.config.schemas.esxi import DiskGroupsDiskScsiAddressSchema, \ HostCacheSchema # External libraries try: import jsonschema HAS_JSONSCHEMA = True except ImportError: HAS_JSONSCHEMA = False # Get Logging Started log = logging.getLogger(__name__) try: from pyVmomi import VmomiSupport # We check the supported vim versions to infer the pyVmomi version if 'vim25/6.0' in VmomiSupport.versionMap and \ sys.version_info > (2, 7) and sys.version_info < (2, 7, 9): log.debug('pyVmomi not loaded: Incompatible versions ' 'of Python. See Issue #29537.') raise ImportError() HAS_PYVMOMI = True except ImportError: HAS_PYVMOMI = False def __virtual__(): return 'esxi.cmd' in __salt__ def coredump_configured(name, enabled, dump_ip, host_vnic='vmk0', dump_port=6500): ''' Ensures a host's core dump configuration. name Name of the state. enabled Sets whether or not ESXi core dump collection should be enabled. This is a boolean value set to ``True`` or ``False`` to enable or disable core dumps. Note that ESXi requires that the core dump must be enabled before any other parameters may be set. This also affects the ``changes`` results in the state return dictionary. If ``enabled`` is ``False``, we can't obtain any previous settings to compare other state variables, resulting in many ``old`` references returning ``None``. Once ``enabled`` is ``True`` the ``changes`` dictionary comparisons will be more accurate. This is due to the way the system coredemp network configuration command returns data. dump_ip The IP address of host that will accept the dump. host_vnic Host VNic port through which to communicate. Defaults to ``vmk0``. dump_port TCP port to use for the dump. Defaults to ``6500``. Example: .. code-block:: yaml configure-host-coredump: esxi.coredump_configured: - enabled: True - dump_ip: 'my-coredump-ip.example.com' ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} esxi_cmd = 'esxi.cmd' enabled_msg = 'ESXi requires that the core dump must be enabled ' \ 'before any other parameters may be set.' host = __pillar__['proxy']['host'] current_config = __salt__[esxi_cmd]('get_coredump_network_config').get(host) error = current_config.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret current_config = current_config.get('Coredump Config') current_enabled = current_config.get('enabled') # Configure coredump enabled state, if there are changes. if current_enabled != enabled: enabled_changes = {'enabled': {'old': current_enabled, 'new': enabled}} # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('coredump_network_enable', enabled=enabled).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret # Allow users to disable core dump, but then return since # nothing else can be set if core dump is disabled. if not enabled: ret['result'] = True ret['comment'] = enabled_msg ret['changes'].update(enabled_changes) return ret ret['changes'].update(enabled_changes) elif not enabled: # If current_enabled and enabled match, but are both False, # We must return before configuring anything. This isn't a # failure as core dump may be disabled intentionally. ret['result'] = True ret['comment'] = enabled_msg return ret # Test for changes with all remaining configurations. The changes flag is used # To detect changes, and then set_coredump_network_config is called one time. changes = False current_ip = current_config.get('ip') if current_ip != dump_ip: ret['changes'].update({'dump_ip': {'old': current_ip, 'new': dump_ip}}) changes = True current_vnic = current_config.get('host_vnic') if current_vnic != host_vnic: ret['changes'].update({'host_vnic': {'old': current_vnic, 'new': host_vnic}}) changes = True current_port = current_config.get('port') if current_port != six.text_type(dump_port): ret['changes'].update({'dump_port': {'old': current_port, 'new': six.text_type(dump_port)}}) changes = True # Only run the command if not using test=True and changes were detected. if not __opts__['test'] and changes is True: response = __salt__[esxi_cmd]('set_coredump_network_config', dump_ip=dump_ip, host_vnic=host_vnic, dump_port=dump_port).get(host) if response.get('success') is False: msg = response.get('stderr') if not msg: msg = response.get('stdout') ret['comment'] = 'Error: {0}'.format(msg) return ret ret['result'] = True if ret['changes'] == {}: ret['comment'] = 'Core Dump configuration is already in the desired state.' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'Core dump configuration will change.' return ret def password_present(name, password): ''' Ensures the given password is set on the ESXi host. Passwords cannot be obtained from host, so if a password is set in this state, the ``vsphere.update_host_password`` function will always run (except when using test=True functionality) and the state's changes dictionary will always be populated. The username for which the password will change is the same username that is used to authenticate against the ESXi host via the Proxy Minion. For example, if the pillar definition for the proxy username is defined as ``root``, then the username that the password will be updated for via this state is ``root``. name Name of the state. password The new password to change on the host. Example: .. code-block:: yaml configure-host-password: esxi.password_present: - password: 'new-bad-password' ''' ret = {'name': name, 'result': True, 'changes': {'old': 'unknown', 'new': '********'}, 'comment': 'Host password was updated.'} esxi_cmd = 'esxi.cmd' if __opts__['test']: ret['result'] = None ret['comment'] = 'Host password will change.' return ret else: try: __salt__[esxi_cmd]('update_host_password', new_password=password) except CommandExecutionError as err: ret['result'] = False ret['comment'] = 'Error: {0}'.format(err) return ret return ret def ntp_configured(name, service_running, ntp_servers=None, service_policy=None, service_restart=False, update_datetime=False): ''' Ensures a host's NTP server configuration such as setting NTP servers, ensuring the NTP daemon is running or stopped, or restarting the NTP daemon for the ESXi host. name Name of the state. service_running Ensures the running state of the ntp daemon for the host. Boolean value where ``True`` indicates that ntpd should be running and ``False`` indicates that it should be stopped. ntp_servers A list of servers that should be added to the ESXi host's NTP configuration. service_policy The policy to set for the NTP service. .. note:: When setting the service policy to ``off`` or ``on``, you *must* quote the setting. If you don't, the yaml parser will set the string to a boolean, which will cause trouble checking for stateful changes and will error when trying to set the policy on the ESXi host. service_restart If set to ``True``, the ntp daemon will be restarted, regardless of its previous running state. Default is ``False``. update_datetime If set to ``True``, the date/time on the given host will be updated to UTC. Default setting is ``False``. This option should be used with caution since network delays and execution delays can result in time skews. Example: .. code-block:: yaml configure-host-ntp: esxi.ntp_configured: - service_running: True - ntp_servers: - 192.174.1.100 - 192.174.1.200 - service_policy: 'on' - service_restart: True ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} esxi_cmd = 'esxi.cmd' host = __pillar__['proxy']['host'] ntpd = 'ntpd' ntp_config = __salt__[esxi_cmd]('get_ntp_config').get(host) ntp_running = __salt__[esxi_cmd]('get_service_running', service_name=ntpd).get(host) error = ntp_running.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ntp_running = ntp_running.get(ntpd) # Configure NTP Servers for the Host if ntp_servers and set(ntp_servers) != set(ntp_config): # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('set_ntp_config', ntp_servers=ntp_servers).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret # Set changes dictionary for ntp_servers ret['changes'].update({'ntp_servers': {'old': ntp_config, 'new': ntp_servers}}) # Configure service_running state if service_running != ntp_running: # Only run the command if not using test=True if not __opts__['test']: # Start ntdp if service_running=True if ntp_running is True: response = __salt__[esxi_cmd]('service_start', service_name=ntpd).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret # Stop ntpd if service_running=False else: response = __salt__[esxi_cmd]('service_stop', service_name=ntpd).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'service_running': {'old': ntp_running, 'new': service_running}}) # Configure service_policy if service_policy: current_service_policy = __salt__[esxi_cmd]('get_service_policy', service_name=ntpd).get(host) error = current_service_policy.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret current_service_policy = current_service_policy.get(ntpd) if service_policy != current_service_policy: # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('set_service_policy', service_name=ntpd, service_policy=service_policy).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'service_policy': {'old': current_service_policy, 'new': service_policy}}) # Update datetime, if requested. if update_datetime: # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('update_host_datetime').get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'update_datetime': {'old': '', 'new': 'Host datetime was updated.'}}) # Restart ntp_service if service_restart=True if service_restart: # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('service_restart', service_name=ntpd).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'service_restart': {'old': '', 'new': 'NTP Daemon Restarted.'}}) ret['result'] = True if ret['changes'] == {}: ret['comment'] = 'NTP is already in the desired state.' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'NTP state will change.' return ret def vmotion_configured(name, enabled, device='vmk0'): ''' Configures a host's VMotion properties such as enabling VMotion and setting the device VirtualNic that VMotion will use. name Name of the state. enabled Ensures whether or not VMotion should be enabled on a host as a boolean value where ``True`` indicates that VMotion should be enabled and ``False`` indicates that VMotion should be disabled. device The device that uniquely identifies the VirtualNic that will be used for VMotion for the host. Defaults to ``vmk0``. Example: .. code-block:: yaml configure-vmotion: esxi.vmotion_configured: - enabled: True - device: sample-device ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} esxi_cmd = 'esxi.cmd' host = __pillar__['proxy']['host'] current_vmotion_enabled = __salt__[esxi_cmd]('get_vmotion_enabled').get(host) current_vmotion_enabled = current_vmotion_enabled.get('VMotion Enabled') # Configure VMotion Enabled state, if changed. if enabled != current_vmotion_enabled: # Only run the command if not using test=True if not __opts__['test']: # Enable VMotion if enabled=True if enabled is True: response = __salt__[esxi_cmd]('vmotion_enable', device=device).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret # Disable VMotion if enabled=False else: response = __salt__[esxi_cmd]('vmotion_disable').get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'enabled': {'old': current_vmotion_enabled, 'new': enabled}}) ret['result'] = True if ret['changes'] == {}: ret['comment'] = 'VMotion configuration is already in the desired state.' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'VMotion configuration will change.' return ret def ssh_configured(name, service_running, ssh_key=None, ssh_key_file=None, service_policy=None, service_restart=False, certificate_verify=False): ''' Manage the SSH configuration for a host including whether or not SSH is running or the presence of a given SSH key. Note: Only one ssh key can be uploaded for root. Uploading a second key will replace any existing key. name Name of the state. service_running Ensures whether or not the SSH service should be running on a host. Represented as a boolean value where ``True`` indicates that SSH should be running and ``False`` indicates that SSH should stopped. In order to update SSH keys, the SSH service must be running. ssh_key Public SSH key to added to the authorized_keys file on the ESXi host. You can use ``ssh_key`` or ``ssh_key_file``, but not both. ssh_key_file File containing the public SSH key to be added to the authorized_keys file on the ESXi host. You can use ``ssh_key_file`` or ``ssh_key``, but not both. service_policy The policy to set for the NTP service. .. note:: When setting the service policy to ``off`` or ``on``, you *must* quote the setting. If you don't, the yaml parser will set the string to a boolean, which will cause trouble checking for stateful changes and will error when trying to set the policy on the ESXi host. service_restart If set to ``True``, the SSH service will be restarted, regardless of its previous running state. Default is ``False``. certificate_verify If set to ``True``, the SSL connection must present a valid certificate. Default is ``False``. Example: .. code-block:: yaml configure-host-ssh: esxi.ssh_configured: - service_running: True - ssh_key_file: /etc/salt/ssh_keys/my_key.pub - service_policy: 'on' - service_restart: True - certificate_verify: True ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} esxi_cmd = 'esxi.cmd' host = __pillar__['proxy']['host'] ssh = 'ssh' ssh_running = __salt__[esxi_cmd]('get_service_running', service_name=ssh).get(host) error = ssh_running.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ssh_running = ssh_running.get(ssh) # Configure SSH service_running state, if changed. if service_running != ssh_running: # Only actually run the command if not using test=True if not __opts__['test']: # Start SSH if service_running=True if service_running is True: enable = __salt__[esxi_cmd]('service_start', service_name=ssh).get(host) error = enable.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret # Disable SSH if service_running=False else: disable = __salt__[esxi_cmd]('service_stop', service_name=ssh).get(host) error = disable.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'service_running': {'old': ssh_running, 'new': service_running}}) # If uploading an SSH key or SSH key file, see if there's a current # SSH key and compare the current key to the key set in the state. current_ssh_key, ssh_key_changed = None, False if ssh_key or ssh_key_file: current_ssh_key = __salt__[esxi_cmd]('get_ssh_key', certificate_verify=certificate_verify) error = current_ssh_key.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret current_ssh_key = current_ssh_key.get('key') if current_ssh_key: clean_current_key = _strip_key(current_ssh_key).split(' ') if not ssh_key: ssh_key = '' # Open ssh key file and read in contents to create one key string with salt.utils.files.fopen(ssh_key_file, 'r') as key_file: for line in key_file: if line.startswith('#'): # Commented line continue ssh_key = ssh_key + line clean_ssh_key = _strip_key(ssh_key).split(' ') # Check that the first two list items of clean key lists are equal. if clean_current_key[0] != clean_ssh_key[0] or clean_current_key[1] != clean_ssh_key[1]: ssh_key_changed = True else: # If current_ssh_key is None, but we're setting a new key with # either ssh_key or ssh_key_file, then we need to flag the change. ssh_key_changed = True # Upload SSH key, if changed. if ssh_key_changed: if not __opts__['test']: # Upload key response = __salt__[esxi_cmd]('upload_ssh_key', ssh_key=ssh_key, ssh_key_file=ssh_key_file, certificate_verify=certificate_verify) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'SSH Key': {'old': current_ssh_key, 'new': ssh_key if ssh_key else ssh_key_file}}) # Configure service_policy if service_policy: current_service_policy = __salt__[esxi_cmd]('get_service_policy', service_name=ssh).get(host) error = current_service_policy.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret current_service_policy = current_service_policy.get(ssh) if service_policy != current_service_policy: # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('set_service_policy', service_name=ssh, service_policy=service_policy).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'service_policy': {'old': current_service_policy, 'new': service_policy}}) # Restart ssh_service if service_restart=True if service_restart: # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('service_restart', service_name=ssh).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'service_restart': {'old': '', 'new': 'SSH service restarted.'}}) ret['result'] = True if ret['changes'] == {}: ret['comment'] = 'SSH service is already in the desired state.' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'SSH service state will change.' return ret def syslog_configured(name, syslog_configs, firewall=True, reset_service=True, reset_syslog_config=False, reset_configs=None): ''' Ensures the specified syslog configuration parameters. By default, this state will reset the syslog service after any new or changed parameters are set successfully. name Name of the state. syslog_configs Name of parameter to set (corresponds to the command line switch for esxcli without the double dashes (--)) Valid syslog_config values are ``logdir``, ``loghost``, ``logdir-unique``, ``default-rotate``, ``default-size``, and ``default-timeout``. Each syslog_config option also needs a configuration value to set. For example, ``loghost`` requires URLs or IP addresses to use for logging. Multiple log servers can be specified by listing them, comma-separated, but without spaces before or after commas (reference: https://blogs.vmware.com/vsphere/2012/04/configuring-multiple-syslog-servers-for-esxi-5.html) firewall Enable the firewall rule set for syslog. Defaults to ``True``. reset_service After a successful parameter set, reset the service. Defaults to ``True``. reset_syslog_config Resets the syslog service to it's default settings. Defaults to ``False``. If set to ``True``, default settings defined by the list of syslog configs in ``reset_configs`` will be reset before running any other syslog settings. reset_configs A comma-delimited list of parameters to reset. Only runs if ``reset_syslog_config`` is set to ``True``. If ``reset_syslog_config`` is set to ``True``, but no syslog configs are listed in ``reset_configs``, then ``reset_configs`` will be set to ``all`` by default. See ``syslog_configs`` parameter above for a list of valid options. Example: .. code-block:: yaml configure-host-syslog: esxi.syslog_configured: - syslog_configs: loghost: ssl://localhost:5432,tcp://10.1.0.1:1514 default-timeout: 120 - firewall: True - reset_service: True - reset_syslog_config: True - reset_configs: loghost,default-timeout ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} esxi_cmd = 'esxi.cmd' host = __pillar__['proxy']['host'] if reset_syslog_config: if not reset_configs: reset_configs = 'all' # Only run the command if not using test=True if not __opts__['test']: reset = __salt__[esxi_cmd]('reset_syslog_config', syslog_config=reset_configs).get(host) for key, val in six.iteritems(reset): if isinstance(val, bool): continue if not val.get('success'): msg = val.get('message') if not msg: msg = 'There was an error resetting a syslog config \'{0}\'.' \ 'Please check debug logs.'.format(val) ret['comment'] = 'Error: {0}'.format(msg) return ret ret['changes'].update({'reset_syslog_config': {'old': '', 'new': reset_configs}}) current_firewall = __salt__[esxi_cmd]('get_firewall_status').get(host) error = current_firewall.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret current_firewall = current_firewall.get('rulesets').get('syslog') if current_firewall != firewall: # Only run the command if not using test=True if not __opts__['test']: enabled = __salt__[esxi_cmd]('enable_firewall_ruleset', ruleset_enable=firewall, ruleset_name='syslog').get(host) if enabled.get('retcode') != 0: err = enabled.get('stderr') out = enabled.get('stdout') ret['comment'] = 'Error: {0}'.format(err if err else out) return ret ret['changes'].update({'firewall': {'old': current_firewall, 'new': firewall}}) current_syslog_config = __salt__[esxi_cmd]('get_syslog_config').get(host) for key, val in six.iteritems(syslog_configs): # The output of get_syslog_config has different keys than the keys # Used to set syslog_config values. We need to look them up first. try: lookup_key = _lookup_syslog_config(key) except KeyError: ret['comment'] = '\'{0}\' is not a valid config variable.'.format(key) return ret current_val = current_syslog_config[lookup_key] if six.text_type(current_val) != six.text_type(val): # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('set_syslog_config', syslog_config=key, config_value=val, firewall=firewall, reset_service=reset_service).get(host) success = response.get(key).get('success') if not success: msg = response.get(key).get('message') if not msg: msg = 'There was an error setting syslog config \'{0}\'. ' \ 'Please check debug logs.'.format(key) ret['comment'] = msg return ret if not ret['changes'].get('syslog_config'): ret['changes'].update({'syslog_config': {}}) ret['changes']['syslog_config'].update({key: {'old': current_val, 'new': val}}) ret['result'] = True if ret['changes'] == {}: ret['comment'] = 'Syslog is already in the desired state.' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'Syslog state will change.' return ret @depends(HAS_PYVMOMI) @depends(HAS_JSONSCHEMA) def diskgroups_configured(name, diskgroups, erase_disks=False): ''' Configures the disk groups to use for vsan. This function will do the following: 1. Check whether or not all disks in the diskgroup spec exist, and raises and errors if they do not. 2. Create diskgroups with the correct disk configurations if diskgroup (identified by the cache disk canonical name) doesn't exist 3. Adds extra capacity disks to the existing diskgroup Example: .. code:: python { 'cache_scsi_addr': 'vmhba1:C0:T0:L0', 'capacity_scsi_addrs': [ 'vmhba2:C0:T0:L0', 'vmhba3:C0:T0:L0', 'vmhba4:C0:T0:L0', ] } name Mandatory state name diskgroups Disk group representation containing scsi disk addresses. Scsi addresses are expected for disks in the diskgroup: erase_disks Specifies whether to erase all partitions on all disks member of the disk group before the disk group is created. Default value is False. ''' proxy_details = __salt__['esxi.get_details']() hostname = proxy_details['host'] if not proxy_details.get('vcenter') \ else proxy_details['esxi_host'] log.info('Running state %s for host \'%s\'', name, hostname) # Variable used to return the result of the invocation ret = {'name': name, 'result': None, 'changes': {}, 'comments': None} # Signals if errors have been encountered errors = False # Signals if changes are required changes = False comments = [] diskgroup_changes = {} si = None try: log.trace('Validating diskgroups_configured input') schema = DiskGroupsDiskScsiAddressSchema.serialize() try: jsonschema.validate({'diskgroups': diskgroups, 'erase_disks': erase_disks}, schema) except jsonschema.exceptions.ValidationError as exc: raise InvalidConfigError(exc) si = __salt__['vsphere.get_service_instance_via_proxy']() host_disks = __salt__['vsphere.list_disks'](service_instance=si) if not host_disks: raise VMwareObjectRetrievalError( 'No disks retrieved from host \'{0}\''.format(hostname)) scsi_addr_to_disk_map = {d['scsi_address']: d for d in host_disks} log.trace('scsi_addr_to_disk_map = %s', scsi_addr_to_disk_map) existing_diskgroups = \ __salt__['vsphere.list_diskgroups'](service_instance=si) cache_disk_to_existing_diskgroup_map = \ {dg['cache_disk']: dg for dg in existing_diskgroups} except CommandExecutionError as err: log.error('Error: %s', err) if si: __salt__['vsphere.disconnect'](si) ret.update({ 'result': False if not __opts__['test'] else None, 'comment': six.text_type(err)}) return ret # Iterate through all of the disk groups for idx, dg in enumerate(diskgroups): # Check for cache disk if not dg['cache_scsi_addr'] in scsi_addr_to_disk_map: comments.append('No cache disk with scsi address \'{0}\' was ' 'found.'.format(dg['cache_scsi_addr'])) log.error(comments[-1]) errors = True continue # Check for capacity disks cache_disk_id = scsi_addr_to_disk_map[dg['cache_scsi_addr']]['id'] cache_disk_display = '{0} (id:{1})'.format(dg['cache_scsi_addr'], cache_disk_id) bad_scsi_addrs = [] capacity_disk_ids = [] capacity_disk_displays = [] for scsi_addr in dg['capacity_scsi_addrs']: if scsi_addr not in scsi_addr_to_disk_map: bad_scsi_addrs.append(scsi_addr) continue capacity_disk_ids.append(scsi_addr_to_disk_map[scsi_addr]['id']) capacity_disk_displays.append( '{0} (id:{1})'.format(scsi_addr, capacity_disk_ids[-1])) if bad_scsi_addrs: comments.append('Error in diskgroup #{0}: capacity disks with ' 'scsi addresses {1} were not found.' ''.format(idx, ', '.join(['\'{0}\''.format(a) for a in bad_scsi_addrs]))) log.error(comments[-1]) errors = True continue if not cache_disk_to_existing_diskgroup_map.get(cache_disk_id): # A new diskgroup needs to be created log.trace('erase_disks = %s', erase_disks) if erase_disks: if __opts__['test']: comments.append('State {0} will ' 'erase all disks of disk group #{1}; ' 'cache disk: \'{2}\', ' 'capacity disk(s): {3}.' ''.format(name, idx, cache_disk_display, ', '.join( ['\'{}\''.format(a) for a in capacity_disk_displays]))) else: # Erase disk group disks for disk_id in [cache_disk_id] + capacity_disk_ids: __salt__['vsphere.erase_disk_partitions']( disk_id=disk_id, service_instance=si) comments.append('Erased disks of diskgroup #{0}; ' 'cache disk: \'{1}\', capacity disk(s): ' '{2}'.format( idx, cache_disk_display, ', '.join(['\'{0}\''.format(a) for a in capacity_disk_displays]))) log.info(comments[-1]) if __opts__['test']: comments.append('State {0} will create ' 'the disk group #{1}; cache disk: \'{2}\', ' 'capacity disk(s): {3}.' .format(name, idx, cache_disk_display, ', '.join(['\'{0}\''.format(a) for a in capacity_disk_displays]))) log.info(comments[-1]) changes = True continue try: __salt__['vsphere.create_diskgroup'](cache_disk_id, capacity_disk_ids, safety_checks=False, service_instance=si) except VMwareSaltError as err: comments.append('Error creating disk group #{0}: ' '{1}.'.format(idx, err)) log.error(comments[-1]) errors = True continue comments.append('Created disk group #\'{0}\'.'.format(idx)) log.info(comments[-1]) diskgroup_changes[six.text_type(idx)] = \ {'new': {'cache': cache_disk_display, 'capacity': capacity_disk_displays}} changes = True continue # The diskgroup exists; checking the capacity disks log.debug('Disk group #%s exists. Checking capacity disks: %s.', idx, capacity_disk_displays) existing_diskgroup = \ cache_disk_to_existing_diskgroup_map.get(cache_disk_id) existing_capacity_disk_displays = \ ['{0} (id:{1})'.format([d['scsi_address'] for d in host_disks if d['id'] == disk_id][0], disk_id) for disk_id in existing_diskgroup['capacity_disks']] # Populate added disks and removed disks and their displays added_capacity_disk_ids = [] added_capacity_disk_displays = [] removed_capacity_disk_ids = [] removed_capacity_disk_displays = [] for disk_id in capacity_disk_ids: if disk_id not in existing_diskgroup['capacity_disks']: disk_scsi_addr = [d['scsi_address'] for d in host_disks if d['id'] == disk_id][0] added_capacity_disk_ids.append(disk_id) added_capacity_disk_displays.append( '{0} (id:{1})'.format(disk_scsi_addr, disk_id)) for disk_id in existing_diskgroup['capacity_disks']: if disk_id not in capacity_disk_ids: disk_scsi_addr = [d['scsi_address'] for d in host_disks if d['id'] == disk_id][0] removed_capacity_disk_ids.append(disk_id) removed_capacity_disk_displays.append( '{0} (id:{1})'.format(disk_scsi_addr, disk_id)) log.debug('Disk group #%s: existing capacity disk ids: %s; added ' 'capacity disk ids: %s; removed capacity disk ids: %s', idx, existing_capacity_disk_displays, added_capacity_disk_displays, removed_capacity_disk_displays) #TODO revisit this when removing capacity disks is supported if removed_capacity_disk_ids: comments.append( 'Error removing capacity disk(s) {0} from disk group #{1}; ' 'operation is not supported.' ''.format(', '.join(['\'{0}\''.format(id) for id in removed_capacity_disk_displays]), idx)) log.error(comments[-1]) errors = True continue if added_capacity_disk_ids: # Capacity disks need to be added to disk group # Building a string representation of the capacity disks # that need to be added s = ', '.join(['\'{0}\''.format(id) for id in added_capacity_disk_displays]) if __opts__['test']: comments.append('State {0} will add ' 'capacity disk(s) {1} to disk group #{2}.' ''.format(name, s, idx)) log.info(comments[-1]) changes = True continue try: __salt__['vsphere.add_capacity_to_diskgroup']( cache_disk_id, added_capacity_disk_ids, safety_checks=False, service_instance=si) except VMwareSaltError as err: comments.append('Error adding capacity disk(s) {0} to ' 'disk group #{1}: {2}.'.format(s, idx, err)) log.error(comments[-1]) errors = True continue com = ('Added capacity disk(s) {0} to disk group #{1}' ''.format(s, idx)) log.info(com) comments.append(com) diskgroup_changes[six.text_type(idx)] = \ {'new': {'cache': cache_disk_display, 'capacity': capacity_disk_displays}, 'old': {'cache': cache_disk_display, 'capacity': existing_capacity_disk_displays}} changes = True continue # No capacity needs to be added s = ('Disk group #{0} is correctly configured. Nothing to be done.' ''.format(idx)) log.info(s) comments.append(s) __salt__['vsphere.disconnect'](si) #Build the final return message result = (True if not (changes or errors) else # no changes/errors None if __opts__['test'] else # running in test mode False if errors else True) # found errors; defaults to True ret.update({'result': result, 'comment': '\n'.join(comments), 'changes': diskgroup_changes}) return ret @depends(HAS_PYVMOMI) @depends(HAS_JSONSCHEMA) def host_cache_configured(name, enabled, datastore, swap_size='100%', dedicated_backing_disk=False, erase_backing_disk=False): ''' Configures the host cache used for swapping. It will do the following: 1. Checks if backing disk exists 2. Creates the VMFS datastore if doesn't exist (datastore partition will be created and use the entire disk) 3. Raises an error if ``dedicated_backing_disk`` is ``True`` and partitions already exist on the backing disk 4. Configures host_cache to use a portion of the datastore for caching (either a specific size or a percentage of the datastore) Examples Percentage swap size (can't be 100%) .. code:: python { 'enabled': true, 'datastore': { 'backing_disk_scsi_addr': 'vmhba0:C0:T0:L0', 'vmfs_version': 5, 'name': 'hostcache' } 'dedicated_backing_disk': false 'swap_size': '98%', } Fixed sized swap size .. code:: python { 'enabled': true, 'datastore': { 'backing_disk_scsi_addr': 'vmhba0:C0:T0:L0', 'vmfs_version': 5, 'name': 'hostcache' } 'dedicated_backing_disk': true 'swap_size': '10GiB', } name Mandatory state name. enabled Specifies whether the host cache is enabled. datastore Specifies the host cache datastore. swap_size Specifies the size of the host cache swap. Can be a percentage or a value in GiB. Default value is ``100%``. dedicated_backing_disk Specifies whether the backing disk is dedicated to the host cache which means it must have no other partitions. Default is False erase_backing_disk Specifies whether to erase all partitions on the backing disk before the datastore is created. Default value is False. ''' log.trace('enabled = %s', enabled) log.trace('datastore = %s', datastore) log.trace('swap_size = %s', swap_size) log.trace('erase_backing_disk = %s', erase_backing_disk) # Variable used to return the result of the invocation proxy_details = __salt__['esxi.get_details']() hostname = proxy_details['host'] if not proxy_details.get('vcenter') \ else proxy_details['esxi_host'] log.trace('hostname = %s', hostname) log.info('Running host_cache_swap_configured for host \'%s\'', hostname) ret = {'name': hostname, 'comment': 'Default comments', 'result': None, 'changes': {}} result = None if __opts__['test'] else True # We assume success needs_setting = False comments = [] changes = {} si = None try: log.debug('Validating host_cache_configured input') schema = HostCacheSchema.serialize() try: jsonschema.validate({'enabled': enabled, 'datastore': datastore, 'swap_size': swap_size, 'erase_backing_disk': erase_backing_disk}, schema) except jsonschema.exceptions.ValidationError as exc: raise InvalidConfigError(exc) m = re.match(r'(\d+)(%|GiB)', swap_size) swap_size_value = int(m.group(1)) swap_type = m.group(2) log.trace('swap_size_value = %s; swap_type = %s', swap_size_value, swap_type) si = __salt__['vsphere.get_service_instance_via_proxy']() host_cache = __salt__['vsphere.get_host_cache'](service_instance=si) # Check enabled if host_cache['enabled'] != enabled: changes.update({'enabled': {'old': host_cache['enabled'], 'new': enabled}}) needs_setting = True # Check datastores existing_datastores = None if host_cache.get('datastore'): existing_datastores = \ __salt__['vsphere.list_datastores_via_proxy']( datastore_names=[datastore['name']], service_instance=si) # Retrieve backing disks existing_disks = __salt__['vsphere.list_disks']( scsi_addresses=[datastore['backing_disk_scsi_addr']], service_instance=si) if not existing_disks: raise VMwareObjectRetrievalError( 'Disk with scsi address \'{0}\' was not found in host \'{1}\'' ''.format(datastore['backing_disk_scsi_addr'], hostname)) backing_disk = existing_disks[0] backing_disk_display = '{0} (id:{1})'.format( backing_disk['scsi_address'], backing_disk['id']) log.trace('backing_disk = %s', backing_disk_display) existing_datastore = None if not existing_datastores: # Check if disk needs to be erased if erase_backing_disk: if __opts__['test']: comments.append('State {0} will erase ' 'the backing disk \'{1}\' on host \'{2}\'.' ''.format(name, backing_disk_display, hostname)) log.info(comments[-1]) else: # Erase disk __salt__['vsphere.erase_disk_partitions']( disk_id=backing_disk['id'], service_instance=si) comments.append('Erased backing disk \'{0}\' on host ' '\'{1}\'.'.format(backing_disk_display, hostname)) log.info(comments[-1]) # Create the datastore if __opts__['test']: comments.append('State {0} will create ' 'the datastore \'{1}\', with backing disk ' '\'{2}\', on host \'{3}\'.' ''.format(name, datastore['name'], backing_disk_display, hostname)) log.info(comments[-1]) else: if dedicated_backing_disk: # Check backing disk doesn't already have partitions partitions = __salt__['vsphere.list_disk_partitions']( disk_id=backing_disk['id'], service_instance=si) log.trace('partitions = %s', partitions) # We will ignore the mbr partitions non_mbr_partitions = [p for p in partitions if p['format'] != 'mbr'] if non_mbr_partitions: raise VMwareApiError( 'Backing disk \'{0}\' has unexpected partitions' ''.format(backing_disk_display)) __salt__['vsphere.create_vmfs_datastore']( datastore['name'], existing_disks[0]['id'], datastore['vmfs_version'], service_instance=si) comments.append('Created vmfs datastore \'{0}\', backed by ' 'disk \'{1}\', on host \'{2}\'.' ''.format(datastore['name'], backing_disk_display, hostname)) log.info(comments[-1]) changes.update( {'datastore': {'new': {'name': datastore['name'], 'backing_disk': backing_disk_display}}}) existing_datastore = \ __salt__['vsphere.list_datastores_via_proxy']( datastore_names=[datastore['name']], service_instance=si)[0] needs_setting = True else: # Check datastore is backed by the correct disk if not existing_datastores[0].get('backing_disk_ids'): raise VMwareSaltError('Datastore \'{0}\' doesn\'t have a ' 'backing disk' ''.format(datastore['name'])) if backing_disk['id'] not in \ existing_datastores[0]['backing_disk_ids']: raise VMwareSaltError( 'Datastore \'{0}\' is not backed by the correct disk: ' 'expected \'{1}\'; got {2}' ''.format( datastore['name'], backing_disk['id'], ', '.join( ['\'{0}\''.format(disk) for disk in existing_datastores[0]['backing_disk_ids']]))) comments.append('Datastore \'{0}\' already exists on host \'{1}\' ' 'and is backed by disk \'{2}\'. Nothing to be ' 'done.'.format(datastore['name'], hostname, backing_disk_display)) existing_datastore = existing_datastores[0] log.trace('existing_datastore = %s', existing_datastore) log.info(comments[-1]) if existing_datastore: # The following comparisons can be done if the existing_datastore # is set; it may not be set if running in test mode # # We support percent, as well as MiB, we will convert the size # to MiB, multiples of 1024 (VMware SDK limitation) if swap_type == '%': # Percentage swap size # Convert from bytes to MiB raw_size_MiB = (swap_size_value/100.0) * \ (existing_datastore['capacity']/1024/1024) else: raw_size_MiB = swap_size_value * 1024 log.trace('raw_size = %sMiB', raw_size_MiB) swap_size_MiB = int(raw_size_MiB/1024)*1024 log.trace('adjusted swap_size = %sMiB', swap_size_MiB) existing_swap_size_MiB = 0 m = re.match(r'(\d+)MiB', host_cache.get('swap_size')) if \ host_cache.get('swap_size') else None if m: # if swap_size from the host is set and has an expected value # we are going to parse it to get the number of MiBs existing_swap_size_MiB = int(m.group(1)) if not existing_swap_size_MiB == swap_size_MiB: needs_setting = True changes.update( {'swap_size': {'old': '{}GiB'.format(existing_swap_size_MiB/1024), 'new': '{}GiB'.format(swap_size_MiB/1024)}}) if needs_setting: if __opts__['test']: comments.append('State {0} will configure ' 'the host cache on host \'{1}\' to: {2}.' ''.format(name, hostname, {'enabled': enabled, 'datastore_name': datastore['name'], 'swap_size': swap_size})) else: if (existing_datastore['capacity'] / 1024.0**2) < \ swap_size_MiB: raise ArgumentValueError( 'Capacity of host cache datastore \'{0}\' ({1} MiB) is ' 'smaller than the required swap size ({2} MiB)' ''.format(existing_datastore['name'], existing_datastore['capacity'] / 1024.0**2, swap_size_MiB)) __salt__['vsphere.configure_host_cache']( enabled, datastore['name'], swap_size_MiB=swap_size_MiB, service_instance=si) comments.append('Host cache configured on host ' '\'{0}\'.'.format(hostname)) else: comments.append('Host cache on host \'{0}\' is already correctly ' 'configured. Nothing to be done.'.format(hostname)) result = True __salt__['vsphere.disconnect'](si) log.info(comments[-1]) ret.update({'comment': '\n'.join(comments), 'result': result, 'changes': changes}) return ret except CommandExecutionError as err: log.error('Error: %s.', err) if si: __salt__['vsphere.disconnect'](si) ret.update({ 'result': False if not __opts__['test'] else None, 'comment': '{}.'.format(err)}) return ret def _lookup_syslog_config(config): ''' Helper function that looks up syslog_config keys available from ``vsphere.get_syslog_config``. ''' lookup = {'default-timeout': 'Default Network Retry Timeout', 'logdir': 'Local Log Output', 'default-size': 'Local Logging Default Rotation Size', 'logdir-unique': 'Log To Unique Subdirectory', 'default-rotate': 'Local Logging Default Rotations', 'loghost': 'Remote Host'} return lookup.get(config) def _strip_key(key_string): ''' Strips an SSH key string of white space and line endings and returns the new string. key_string The string to be stripped. ''' key_string.strip() key_string.replace('\n', '') key_string.replace('\r\n', '') return key_string
saltstack/salt
salt/states/esxi.py
ssh_configured
python
def ssh_configured(name, service_running, ssh_key=None, ssh_key_file=None, service_policy=None, service_restart=False, certificate_verify=False): ''' Manage the SSH configuration for a host including whether or not SSH is running or the presence of a given SSH key. Note: Only one ssh key can be uploaded for root. Uploading a second key will replace any existing key. name Name of the state. service_running Ensures whether or not the SSH service should be running on a host. Represented as a boolean value where ``True`` indicates that SSH should be running and ``False`` indicates that SSH should stopped. In order to update SSH keys, the SSH service must be running. ssh_key Public SSH key to added to the authorized_keys file on the ESXi host. You can use ``ssh_key`` or ``ssh_key_file``, but not both. ssh_key_file File containing the public SSH key to be added to the authorized_keys file on the ESXi host. You can use ``ssh_key_file`` or ``ssh_key``, but not both. service_policy The policy to set for the NTP service. .. note:: When setting the service policy to ``off`` or ``on``, you *must* quote the setting. If you don't, the yaml parser will set the string to a boolean, which will cause trouble checking for stateful changes and will error when trying to set the policy on the ESXi host. service_restart If set to ``True``, the SSH service will be restarted, regardless of its previous running state. Default is ``False``. certificate_verify If set to ``True``, the SSL connection must present a valid certificate. Default is ``False``. Example: .. code-block:: yaml configure-host-ssh: esxi.ssh_configured: - service_running: True - ssh_key_file: /etc/salt/ssh_keys/my_key.pub - service_policy: 'on' - service_restart: True - certificate_verify: True ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} esxi_cmd = 'esxi.cmd' host = __pillar__['proxy']['host'] ssh = 'ssh' ssh_running = __salt__[esxi_cmd]('get_service_running', service_name=ssh).get(host) error = ssh_running.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ssh_running = ssh_running.get(ssh) # Configure SSH service_running state, if changed. if service_running != ssh_running: # Only actually run the command if not using test=True if not __opts__['test']: # Start SSH if service_running=True if service_running is True: enable = __salt__[esxi_cmd]('service_start', service_name=ssh).get(host) error = enable.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret # Disable SSH if service_running=False else: disable = __salt__[esxi_cmd]('service_stop', service_name=ssh).get(host) error = disable.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'service_running': {'old': ssh_running, 'new': service_running}}) # If uploading an SSH key or SSH key file, see if there's a current # SSH key and compare the current key to the key set in the state. current_ssh_key, ssh_key_changed = None, False if ssh_key or ssh_key_file: current_ssh_key = __salt__[esxi_cmd]('get_ssh_key', certificate_verify=certificate_verify) error = current_ssh_key.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret current_ssh_key = current_ssh_key.get('key') if current_ssh_key: clean_current_key = _strip_key(current_ssh_key).split(' ') if not ssh_key: ssh_key = '' # Open ssh key file and read in contents to create one key string with salt.utils.files.fopen(ssh_key_file, 'r') as key_file: for line in key_file: if line.startswith('#'): # Commented line continue ssh_key = ssh_key + line clean_ssh_key = _strip_key(ssh_key).split(' ') # Check that the first two list items of clean key lists are equal. if clean_current_key[0] != clean_ssh_key[0] or clean_current_key[1] != clean_ssh_key[1]: ssh_key_changed = True else: # If current_ssh_key is None, but we're setting a new key with # either ssh_key or ssh_key_file, then we need to flag the change. ssh_key_changed = True # Upload SSH key, if changed. if ssh_key_changed: if not __opts__['test']: # Upload key response = __salt__[esxi_cmd]('upload_ssh_key', ssh_key=ssh_key, ssh_key_file=ssh_key_file, certificate_verify=certificate_verify) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'SSH Key': {'old': current_ssh_key, 'new': ssh_key if ssh_key else ssh_key_file}}) # Configure service_policy if service_policy: current_service_policy = __salt__[esxi_cmd]('get_service_policy', service_name=ssh).get(host) error = current_service_policy.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret current_service_policy = current_service_policy.get(ssh) if service_policy != current_service_policy: # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('set_service_policy', service_name=ssh, service_policy=service_policy).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'service_policy': {'old': current_service_policy, 'new': service_policy}}) # Restart ssh_service if service_restart=True if service_restart: # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('service_restart', service_name=ssh).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'service_restart': {'old': '', 'new': 'SSH service restarted.'}}) ret['result'] = True if ret['changes'] == {}: ret['comment'] = 'SSH service is already in the desired state.' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'SSH service state will change.' return ret
Manage the SSH configuration for a host including whether or not SSH is running or the presence of a given SSH key. Note: Only one ssh key can be uploaded for root. Uploading a second key will replace any existing key. name Name of the state. service_running Ensures whether or not the SSH service should be running on a host. Represented as a boolean value where ``True`` indicates that SSH should be running and ``False`` indicates that SSH should stopped. In order to update SSH keys, the SSH service must be running. ssh_key Public SSH key to added to the authorized_keys file on the ESXi host. You can use ``ssh_key`` or ``ssh_key_file``, but not both. ssh_key_file File containing the public SSH key to be added to the authorized_keys file on the ESXi host. You can use ``ssh_key_file`` or ``ssh_key``, but not both. service_policy The policy to set for the NTP service. .. note:: When setting the service policy to ``off`` or ``on``, you *must* quote the setting. If you don't, the yaml parser will set the string to a boolean, which will cause trouble checking for stateful changes and will error when trying to set the policy on the ESXi host. service_restart If set to ``True``, the SSH service will be restarted, regardless of its previous running state. Default is ``False``. certificate_verify If set to ``True``, the SSL connection must present a valid certificate. Default is ``False``. Example: .. code-block:: yaml configure-host-ssh: esxi.ssh_configured: - service_running: True - ssh_key_file: /etc/salt/ssh_keys/my_key.pub - service_policy: 'on' - service_restart: True - certificate_verify: True
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/esxi.py#L669-L866
[ "def fopen(*args, **kwargs):\n '''\n Wrapper around open() built-in to set CLOEXEC on the fd.\n\n This flag specifies that the file descriptor should be closed when an exec\n function is invoked;\n\n When a file descriptor is allocated (as with open or dup), this bit is\n initially cleared on the new file descriptor, meaning that descriptor will\n survive into the new program after exec.\n\n NB! We still have small race condition between open and fcntl.\n '''\n if six.PY3:\n try:\n # Don't permit stdin/stdout/stderr to be opened. The boolean False\n # and True are treated by Python 3's open() as file descriptors 0\n # and 1, respectively.\n if args[0] in (0, 1, 2):\n raise TypeError(\n '{0} is not a permitted file descriptor'.format(args[0])\n )\n except IndexError:\n pass\n binary = None\n # ensure 'binary' mode is always used on Windows in Python 2\n if ((six.PY2 and salt.utils.platform.is_windows() and 'binary' not in kwargs) or\n kwargs.pop('binary', False)):\n if len(args) > 1:\n args = list(args)\n if 'b' not in args[1]:\n args[1] = args[1].replace('t', 'b')\n if 'b' not in args[1]:\n args[1] += 'b'\n elif kwargs.get('mode'):\n if 'b' not in kwargs['mode']:\n kwargs['mode'] = kwargs['mode'].replace('t', 'b')\n if 'b' not in kwargs['mode']:\n kwargs['mode'] += 'b'\n else:\n # the default is to read\n kwargs['mode'] = 'rb'\n elif six.PY3 and 'encoding' not in kwargs:\n # In Python 3, if text mode is used and the encoding\n # is not specified, set the encoding to 'utf-8'.\n binary = False\n if len(args) > 1:\n args = list(args)\n if 'b' in args[1]:\n binary = True\n if kwargs.get('mode', None):\n if 'b' in kwargs['mode']:\n binary = True\n if not binary:\n kwargs['encoding'] = __salt_system_encoding__\n\n if six.PY3 and not binary and not kwargs.get('newline', None):\n kwargs['newline'] = ''\n\n f_handle = open(*args, **kwargs) # pylint: disable=resource-leakage\n\n if is_fcntl_available():\n # modify the file descriptor on systems with fcntl\n # unix and unix-like systems only\n try:\n FD_CLOEXEC = fcntl.FD_CLOEXEC # pylint: disable=C0103\n except AttributeError:\n FD_CLOEXEC = 1 # pylint: disable=C0103\n old_flags = fcntl.fcntl(f_handle.fileno(), fcntl.F_GETFD)\n fcntl.fcntl(f_handle.fileno(), fcntl.F_SETFD, old_flags | FD_CLOEXEC)\n\n return f_handle\n", "def _strip_key(key_string):\n '''\n Strips an SSH key string of white space and line endings and returns the new string.\n\n key_string\n The string to be stripped.\n '''\n key_string.strip()\n key_string.replace('\\n', '')\n key_string.replace('\\r\\n', '')\n return key_string\n" ]
# -*- coding: utf-8 -*- ''' Manage VMware ESXi Hosts. .. versionadded:: 2015.8.4 Dependencies ============ - pyVmomi Python Module - ESXCLI pyVmomi ------- PyVmomi can be installed via pip: .. code-block:: bash pip install pyVmomi .. note:: Version 6.0 of pyVmomi has some problems with SSL error handling on certain versions of Python. If using version 6.0 of pyVmomi, Python 2.6, Python 2.7.9, or newer must be present. This is due to an upstream dependency in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the version of Python is not in the supported range, you will need to install an earlier version of pyVmomi. See `Issue #29537`_ for more information. .. _Issue #29537: https://github.com/saltstack/salt/issues/29537 Based on the note above, to install an earlier version of pyVmomi than the version currently listed in PyPi, run the following: .. code-block:: bash pip install pyVmomi==5.5.0.2014.1.1 The 5.5.0.2014.1.1 is a known stable version that this original ESXi State Module was developed against. ESXCLI ------ Currently, about a third of the functions used in the vSphere Execution Module require the ESXCLI package be installed on the machine running the Proxy Minion process. The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware provides vCLI package installation instructions for `vSphere 5.5`_ and `vSphere 6.0`_. .. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html .. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html Once all of the required dependencies are in place and the vCLI package is installed, you can check to see if you can connect to your ESXi host or vCenter server by running the following command: .. code-block:: bash esxcli -s <host-location> -u <username> -p <password> system syslog config get If the connection was successful, ESXCLI was successfully installed on your system. You should see output related to the ESXi host's syslog configuration. .. note:: Be aware that some functionality in this state module may depend on the type of license attached to the ESXi host. For example, certain services are only available to manipulate service state or policies with a VMware vSphere Enterprise or Enterprise Plus license, while others are available with a Standard license. The ``ntpd`` service is restricted to an Enterprise Plus license, while ``ssh`` is available via the Standard license. Please see the `vSphere Comparison`_ page for more information. .. _vSphere Comparison: https://www.vmware.com/products/vsphere/compare About ----- This state module was written to be used in conjunction with Salt's :mod:`ESXi Proxy Minion <salt.proxy.esxi>`. For a tutorial on how to use Salt's ESXi Proxy Minion, please refer to the :ref:`ESXi Proxy Minion Tutorial <tutorial-esxi-proxy>` for configuration examples, dependency installation instructions, how to run remote execution functions against ESXi hosts via a Salt Proxy Minion, and a larger state example. ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import logging import sys import re # Import Salt Libs from salt.ext import six import salt.utils.files from salt.exceptions import CommandExecutionError, InvalidConfigError, \ VMwareObjectRetrievalError, VMwareSaltError, VMwareApiError, \ ArgumentValueError from salt.utils.decorators import depends from salt.config.schemas.esxi import DiskGroupsDiskScsiAddressSchema, \ HostCacheSchema # External libraries try: import jsonschema HAS_JSONSCHEMA = True except ImportError: HAS_JSONSCHEMA = False # Get Logging Started log = logging.getLogger(__name__) try: from pyVmomi import VmomiSupport # We check the supported vim versions to infer the pyVmomi version if 'vim25/6.0' in VmomiSupport.versionMap and \ sys.version_info > (2, 7) and sys.version_info < (2, 7, 9): log.debug('pyVmomi not loaded: Incompatible versions ' 'of Python. See Issue #29537.') raise ImportError() HAS_PYVMOMI = True except ImportError: HAS_PYVMOMI = False def __virtual__(): return 'esxi.cmd' in __salt__ def coredump_configured(name, enabled, dump_ip, host_vnic='vmk0', dump_port=6500): ''' Ensures a host's core dump configuration. name Name of the state. enabled Sets whether or not ESXi core dump collection should be enabled. This is a boolean value set to ``True`` or ``False`` to enable or disable core dumps. Note that ESXi requires that the core dump must be enabled before any other parameters may be set. This also affects the ``changes`` results in the state return dictionary. If ``enabled`` is ``False``, we can't obtain any previous settings to compare other state variables, resulting in many ``old`` references returning ``None``. Once ``enabled`` is ``True`` the ``changes`` dictionary comparisons will be more accurate. This is due to the way the system coredemp network configuration command returns data. dump_ip The IP address of host that will accept the dump. host_vnic Host VNic port through which to communicate. Defaults to ``vmk0``. dump_port TCP port to use for the dump. Defaults to ``6500``. Example: .. code-block:: yaml configure-host-coredump: esxi.coredump_configured: - enabled: True - dump_ip: 'my-coredump-ip.example.com' ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} esxi_cmd = 'esxi.cmd' enabled_msg = 'ESXi requires that the core dump must be enabled ' \ 'before any other parameters may be set.' host = __pillar__['proxy']['host'] current_config = __salt__[esxi_cmd]('get_coredump_network_config').get(host) error = current_config.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret current_config = current_config.get('Coredump Config') current_enabled = current_config.get('enabled') # Configure coredump enabled state, if there are changes. if current_enabled != enabled: enabled_changes = {'enabled': {'old': current_enabled, 'new': enabled}} # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('coredump_network_enable', enabled=enabled).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret # Allow users to disable core dump, but then return since # nothing else can be set if core dump is disabled. if not enabled: ret['result'] = True ret['comment'] = enabled_msg ret['changes'].update(enabled_changes) return ret ret['changes'].update(enabled_changes) elif not enabled: # If current_enabled and enabled match, but are both False, # We must return before configuring anything. This isn't a # failure as core dump may be disabled intentionally. ret['result'] = True ret['comment'] = enabled_msg return ret # Test for changes with all remaining configurations. The changes flag is used # To detect changes, and then set_coredump_network_config is called one time. changes = False current_ip = current_config.get('ip') if current_ip != dump_ip: ret['changes'].update({'dump_ip': {'old': current_ip, 'new': dump_ip}}) changes = True current_vnic = current_config.get('host_vnic') if current_vnic != host_vnic: ret['changes'].update({'host_vnic': {'old': current_vnic, 'new': host_vnic}}) changes = True current_port = current_config.get('port') if current_port != six.text_type(dump_port): ret['changes'].update({'dump_port': {'old': current_port, 'new': six.text_type(dump_port)}}) changes = True # Only run the command if not using test=True and changes were detected. if not __opts__['test'] and changes is True: response = __salt__[esxi_cmd]('set_coredump_network_config', dump_ip=dump_ip, host_vnic=host_vnic, dump_port=dump_port).get(host) if response.get('success') is False: msg = response.get('stderr') if not msg: msg = response.get('stdout') ret['comment'] = 'Error: {0}'.format(msg) return ret ret['result'] = True if ret['changes'] == {}: ret['comment'] = 'Core Dump configuration is already in the desired state.' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'Core dump configuration will change.' return ret def password_present(name, password): ''' Ensures the given password is set on the ESXi host. Passwords cannot be obtained from host, so if a password is set in this state, the ``vsphere.update_host_password`` function will always run (except when using test=True functionality) and the state's changes dictionary will always be populated. The username for which the password will change is the same username that is used to authenticate against the ESXi host via the Proxy Minion. For example, if the pillar definition for the proxy username is defined as ``root``, then the username that the password will be updated for via this state is ``root``. name Name of the state. password The new password to change on the host. Example: .. code-block:: yaml configure-host-password: esxi.password_present: - password: 'new-bad-password' ''' ret = {'name': name, 'result': True, 'changes': {'old': 'unknown', 'new': '********'}, 'comment': 'Host password was updated.'} esxi_cmd = 'esxi.cmd' if __opts__['test']: ret['result'] = None ret['comment'] = 'Host password will change.' return ret else: try: __salt__[esxi_cmd]('update_host_password', new_password=password) except CommandExecutionError as err: ret['result'] = False ret['comment'] = 'Error: {0}'.format(err) return ret return ret def ntp_configured(name, service_running, ntp_servers=None, service_policy=None, service_restart=False, update_datetime=False): ''' Ensures a host's NTP server configuration such as setting NTP servers, ensuring the NTP daemon is running or stopped, or restarting the NTP daemon for the ESXi host. name Name of the state. service_running Ensures the running state of the ntp daemon for the host. Boolean value where ``True`` indicates that ntpd should be running and ``False`` indicates that it should be stopped. ntp_servers A list of servers that should be added to the ESXi host's NTP configuration. service_policy The policy to set for the NTP service. .. note:: When setting the service policy to ``off`` or ``on``, you *must* quote the setting. If you don't, the yaml parser will set the string to a boolean, which will cause trouble checking for stateful changes and will error when trying to set the policy on the ESXi host. service_restart If set to ``True``, the ntp daemon will be restarted, regardless of its previous running state. Default is ``False``. update_datetime If set to ``True``, the date/time on the given host will be updated to UTC. Default setting is ``False``. This option should be used with caution since network delays and execution delays can result in time skews. Example: .. code-block:: yaml configure-host-ntp: esxi.ntp_configured: - service_running: True - ntp_servers: - 192.174.1.100 - 192.174.1.200 - service_policy: 'on' - service_restart: True ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} esxi_cmd = 'esxi.cmd' host = __pillar__['proxy']['host'] ntpd = 'ntpd' ntp_config = __salt__[esxi_cmd]('get_ntp_config').get(host) ntp_running = __salt__[esxi_cmd]('get_service_running', service_name=ntpd).get(host) error = ntp_running.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ntp_running = ntp_running.get(ntpd) # Configure NTP Servers for the Host if ntp_servers and set(ntp_servers) != set(ntp_config): # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('set_ntp_config', ntp_servers=ntp_servers).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret # Set changes dictionary for ntp_servers ret['changes'].update({'ntp_servers': {'old': ntp_config, 'new': ntp_servers}}) # Configure service_running state if service_running != ntp_running: # Only run the command if not using test=True if not __opts__['test']: # Start ntdp if service_running=True if ntp_running is True: response = __salt__[esxi_cmd]('service_start', service_name=ntpd).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret # Stop ntpd if service_running=False else: response = __salt__[esxi_cmd]('service_stop', service_name=ntpd).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'service_running': {'old': ntp_running, 'new': service_running}}) # Configure service_policy if service_policy: current_service_policy = __salt__[esxi_cmd]('get_service_policy', service_name=ntpd).get(host) error = current_service_policy.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret current_service_policy = current_service_policy.get(ntpd) if service_policy != current_service_policy: # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('set_service_policy', service_name=ntpd, service_policy=service_policy).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'service_policy': {'old': current_service_policy, 'new': service_policy}}) # Update datetime, if requested. if update_datetime: # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('update_host_datetime').get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'update_datetime': {'old': '', 'new': 'Host datetime was updated.'}}) # Restart ntp_service if service_restart=True if service_restart: # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('service_restart', service_name=ntpd).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'service_restart': {'old': '', 'new': 'NTP Daemon Restarted.'}}) ret['result'] = True if ret['changes'] == {}: ret['comment'] = 'NTP is already in the desired state.' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'NTP state will change.' return ret def vmotion_configured(name, enabled, device='vmk0'): ''' Configures a host's VMotion properties such as enabling VMotion and setting the device VirtualNic that VMotion will use. name Name of the state. enabled Ensures whether or not VMotion should be enabled on a host as a boolean value where ``True`` indicates that VMotion should be enabled and ``False`` indicates that VMotion should be disabled. device The device that uniquely identifies the VirtualNic that will be used for VMotion for the host. Defaults to ``vmk0``. Example: .. code-block:: yaml configure-vmotion: esxi.vmotion_configured: - enabled: True - device: sample-device ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} esxi_cmd = 'esxi.cmd' host = __pillar__['proxy']['host'] current_vmotion_enabled = __salt__[esxi_cmd]('get_vmotion_enabled').get(host) current_vmotion_enabled = current_vmotion_enabled.get('VMotion Enabled') # Configure VMotion Enabled state, if changed. if enabled != current_vmotion_enabled: # Only run the command if not using test=True if not __opts__['test']: # Enable VMotion if enabled=True if enabled is True: response = __salt__[esxi_cmd]('vmotion_enable', device=device).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret # Disable VMotion if enabled=False else: response = __salt__[esxi_cmd]('vmotion_disable').get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'enabled': {'old': current_vmotion_enabled, 'new': enabled}}) ret['result'] = True if ret['changes'] == {}: ret['comment'] = 'VMotion configuration is already in the desired state.' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'VMotion configuration will change.' return ret def vsan_configured(name, enabled, add_disks_to_vsan=False): ''' Configures a host's VSAN properties such as enabling or disabling VSAN, or adding VSAN-eligible disks to the VSAN system for the host. name Name of the state. enabled Ensures whether or not VSAN should be enabled on a host as a boolean value where ``True`` indicates that VSAN should be enabled and ``False`` indicates that VSAN should be disabled. add_disks_to_vsan If set to ``True``, any VSAN-eligible disks for the given host will be added to the host's VSAN system. Default is ``False``. Example: .. code-block:: yaml configure-host-vsan: esxi.vsan_configured: - enabled: True - add_disks_to_vsan: True ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} esxi_cmd = 'esxi.cmd' host = __pillar__['proxy']['host'] current_vsan_enabled = __salt__[esxi_cmd]('get_vsan_enabled').get(host) error = current_vsan_enabled.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret current_vsan_enabled = current_vsan_enabled.get('VSAN Enabled') # Configure VSAN Enabled state, if changed. if enabled != current_vsan_enabled: # Only run the command if not using test=True if not __opts__['test']: # Enable VSAN if enabled=True if enabled is True: response = __salt__[esxi_cmd]('vsan_enable').get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret # Disable VSAN if enabled=False else: response = __salt__[esxi_cmd]('vsan_disable').get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'enabled': {'old': current_vsan_enabled, 'new': enabled}}) # Add any eligible disks to VSAN, if requested. if add_disks_to_vsan: current_eligible_disks = __salt__[esxi_cmd]('get_vsan_eligible_disks').get(host) error = current_eligible_disks.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret disks = current_eligible_disks.get('Eligible') if disks and isinstance(disks, list): # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('vsan_add_disks').get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'add_disks_to_vsan': {'old': '', 'new': disks}}) ret['result'] = True if ret['changes'] == {}: ret['comment'] = 'VSAN configuration is already in the desired state.' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'VSAN configuration will change.' return ret def syslog_configured(name, syslog_configs, firewall=True, reset_service=True, reset_syslog_config=False, reset_configs=None): ''' Ensures the specified syslog configuration parameters. By default, this state will reset the syslog service after any new or changed parameters are set successfully. name Name of the state. syslog_configs Name of parameter to set (corresponds to the command line switch for esxcli without the double dashes (--)) Valid syslog_config values are ``logdir``, ``loghost``, ``logdir-unique``, ``default-rotate``, ``default-size``, and ``default-timeout``. Each syslog_config option also needs a configuration value to set. For example, ``loghost`` requires URLs or IP addresses to use for logging. Multiple log servers can be specified by listing them, comma-separated, but without spaces before or after commas (reference: https://blogs.vmware.com/vsphere/2012/04/configuring-multiple-syslog-servers-for-esxi-5.html) firewall Enable the firewall rule set for syslog. Defaults to ``True``. reset_service After a successful parameter set, reset the service. Defaults to ``True``. reset_syslog_config Resets the syslog service to it's default settings. Defaults to ``False``. If set to ``True``, default settings defined by the list of syslog configs in ``reset_configs`` will be reset before running any other syslog settings. reset_configs A comma-delimited list of parameters to reset. Only runs if ``reset_syslog_config`` is set to ``True``. If ``reset_syslog_config`` is set to ``True``, but no syslog configs are listed in ``reset_configs``, then ``reset_configs`` will be set to ``all`` by default. See ``syslog_configs`` parameter above for a list of valid options. Example: .. code-block:: yaml configure-host-syslog: esxi.syslog_configured: - syslog_configs: loghost: ssl://localhost:5432,tcp://10.1.0.1:1514 default-timeout: 120 - firewall: True - reset_service: True - reset_syslog_config: True - reset_configs: loghost,default-timeout ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} esxi_cmd = 'esxi.cmd' host = __pillar__['proxy']['host'] if reset_syslog_config: if not reset_configs: reset_configs = 'all' # Only run the command if not using test=True if not __opts__['test']: reset = __salt__[esxi_cmd]('reset_syslog_config', syslog_config=reset_configs).get(host) for key, val in six.iteritems(reset): if isinstance(val, bool): continue if not val.get('success'): msg = val.get('message') if not msg: msg = 'There was an error resetting a syslog config \'{0}\'.' \ 'Please check debug logs.'.format(val) ret['comment'] = 'Error: {0}'.format(msg) return ret ret['changes'].update({'reset_syslog_config': {'old': '', 'new': reset_configs}}) current_firewall = __salt__[esxi_cmd]('get_firewall_status').get(host) error = current_firewall.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret current_firewall = current_firewall.get('rulesets').get('syslog') if current_firewall != firewall: # Only run the command if not using test=True if not __opts__['test']: enabled = __salt__[esxi_cmd]('enable_firewall_ruleset', ruleset_enable=firewall, ruleset_name='syslog').get(host) if enabled.get('retcode') != 0: err = enabled.get('stderr') out = enabled.get('stdout') ret['comment'] = 'Error: {0}'.format(err if err else out) return ret ret['changes'].update({'firewall': {'old': current_firewall, 'new': firewall}}) current_syslog_config = __salt__[esxi_cmd]('get_syslog_config').get(host) for key, val in six.iteritems(syslog_configs): # The output of get_syslog_config has different keys than the keys # Used to set syslog_config values. We need to look them up first. try: lookup_key = _lookup_syslog_config(key) except KeyError: ret['comment'] = '\'{0}\' is not a valid config variable.'.format(key) return ret current_val = current_syslog_config[lookup_key] if six.text_type(current_val) != six.text_type(val): # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('set_syslog_config', syslog_config=key, config_value=val, firewall=firewall, reset_service=reset_service).get(host) success = response.get(key).get('success') if not success: msg = response.get(key).get('message') if not msg: msg = 'There was an error setting syslog config \'{0}\'. ' \ 'Please check debug logs.'.format(key) ret['comment'] = msg return ret if not ret['changes'].get('syslog_config'): ret['changes'].update({'syslog_config': {}}) ret['changes']['syslog_config'].update({key: {'old': current_val, 'new': val}}) ret['result'] = True if ret['changes'] == {}: ret['comment'] = 'Syslog is already in the desired state.' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'Syslog state will change.' return ret @depends(HAS_PYVMOMI) @depends(HAS_JSONSCHEMA) def diskgroups_configured(name, diskgroups, erase_disks=False): ''' Configures the disk groups to use for vsan. This function will do the following: 1. Check whether or not all disks in the diskgroup spec exist, and raises and errors if they do not. 2. Create diskgroups with the correct disk configurations if diskgroup (identified by the cache disk canonical name) doesn't exist 3. Adds extra capacity disks to the existing diskgroup Example: .. code:: python { 'cache_scsi_addr': 'vmhba1:C0:T0:L0', 'capacity_scsi_addrs': [ 'vmhba2:C0:T0:L0', 'vmhba3:C0:T0:L0', 'vmhba4:C0:T0:L0', ] } name Mandatory state name diskgroups Disk group representation containing scsi disk addresses. Scsi addresses are expected for disks in the diskgroup: erase_disks Specifies whether to erase all partitions on all disks member of the disk group before the disk group is created. Default value is False. ''' proxy_details = __salt__['esxi.get_details']() hostname = proxy_details['host'] if not proxy_details.get('vcenter') \ else proxy_details['esxi_host'] log.info('Running state %s for host \'%s\'', name, hostname) # Variable used to return the result of the invocation ret = {'name': name, 'result': None, 'changes': {}, 'comments': None} # Signals if errors have been encountered errors = False # Signals if changes are required changes = False comments = [] diskgroup_changes = {} si = None try: log.trace('Validating diskgroups_configured input') schema = DiskGroupsDiskScsiAddressSchema.serialize() try: jsonschema.validate({'diskgroups': diskgroups, 'erase_disks': erase_disks}, schema) except jsonschema.exceptions.ValidationError as exc: raise InvalidConfigError(exc) si = __salt__['vsphere.get_service_instance_via_proxy']() host_disks = __salt__['vsphere.list_disks'](service_instance=si) if not host_disks: raise VMwareObjectRetrievalError( 'No disks retrieved from host \'{0}\''.format(hostname)) scsi_addr_to_disk_map = {d['scsi_address']: d for d in host_disks} log.trace('scsi_addr_to_disk_map = %s', scsi_addr_to_disk_map) existing_diskgroups = \ __salt__['vsphere.list_diskgroups'](service_instance=si) cache_disk_to_existing_diskgroup_map = \ {dg['cache_disk']: dg for dg in existing_diskgroups} except CommandExecutionError as err: log.error('Error: %s', err) if si: __salt__['vsphere.disconnect'](si) ret.update({ 'result': False if not __opts__['test'] else None, 'comment': six.text_type(err)}) return ret # Iterate through all of the disk groups for idx, dg in enumerate(diskgroups): # Check for cache disk if not dg['cache_scsi_addr'] in scsi_addr_to_disk_map: comments.append('No cache disk with scsi address \'{0}\' was ' 'found.'.format(dg['cache_scsi_addr'])) log.error(comments[-1]) errors = True continue # Check for capacity disks cache_disk_id = scsi_addr_to_disk_map[dg['cache_scsi_addr']]['id'] cache_disk_display = '{0} (id:{1})'.format(dg['cache_scsi_addr'], cache_disk_id) bad_scsi_addrs = [] capacity_disk_ids = [] capacity_disk_displays = [] for scsi_addr in dg['capacity_scsi_addrs']: if scsi_addr not in scsi_addr_to_disk_map: bad_scsi_addrs.append(scsi_addr) continue capacity_disk_ids.append(scsi_addr_to_disk_map[scsi_addr]['id']) capacity_disk_displays.append( '{0} (id:{1})'.format(scsi_addr, capacity_disk_ids[-1])) if bad_scsi_addrs: comments.append('Error in diskgroup #{0}: capacity disks with ' 'scsi addresses {1} were not found.' ''.format(idx, ', '.join(['\'{0}\''.format(a) for a in bad_scsi_addrs]))) log.error(comments[-1]) errors = True continue if not cache_disk_to_existing_diskgroup_map.get(cache_disk_id): # A new diskgroup needs to be created log.trace('erase_disks = %s', erase_disks) if erase_disks: if __opts__['test']: comments.append('State {0} will ' 'erase all disks of disk group #{1}; ' 'cache disk: \'{2}\', ' 'capacity disk(s): {3}.' ''.format(name, idx, cache_disk_display, ', '.join( ['\'{}\''.format(a) for a in capacity_disk_displays]))) else: # Erase disk group disks for disk_id in [cache_disk_id] + capacity_disk_ids: __salt__['vsphere.erase_disk_partitions']( disk_id=disk_id, service_instance=si) comments.append('Erased disks of diskgroup #{0}; ' 'cache disk: \'{1}\', capacity disk(s): ' '{2}'.format( idx, cache_disk_display, ', '.join(['\'{0}\''.format(a) for a in capacity_disk_displays]))) log.info(comments[-1]) if __opts__['test']: comments.append('State {0} will create ' 'the disk group #{1}; cache disk: \'{2}\', ' 'capacity disk(s): {3}.' .format(name, idx, cache_disk_display, ', '.join(['\'{0}\''.format(a) for a in capacity_disk_displays]))) log.info(comments[-1]) changes = True continue try: __salt__['vsphere.create_diskgroup'](cache_disk_id, capacity_disk_ids, safety_checks=False, service_instance=si) except VMwareSaltError as err: comments.append('Error creating disk group #{0}: ' '{1}.'.format(idx, err)) log.error(comments[-1]) errors = True continue comments.append('Created disk group #\'{0}\'.'.format(idx)) log.info(comments[-1]) diskgroup_changes[six.text_type(idx)] = \ {'new': {'cache': cache_disk_display, 'capacity': capacity_disk_displays}} changes = True continue # The diskgroup exists; checking the capacity disks log.debug('Disk group #%s exists. Checking capacity disks: %s.', idx, capacity_disk_displays) existing_diskgroup = \ cache_disk_to_existing_diskgroup_map.get(cache_disk_id) existing_capacity_disk_displays = \ ['{0} (id:{1})'.format([d['scsi_address'] for d in host_disks if d['id'] == disk_id][0], disk_id) for disk_id in existing_diskgroup['capacity_disks']] # Populate added disks and removed disks and their displays added_capacity_disk_ids = [] added_capacity_disk_displays = [] removed_capacity_disk_ids = [] removed_capacity_disk_displays = [] for disk_id in capacity_disk_ids: if disk_id not in existing_diskgroup['capacity_disks']: disk_scsi_addr = [d['scsi_address'] for d in host_disks if d['id'] == disk_id][0] added_capacity_disk_ids.append(disk_id) added_capacity_disk_displays.append( '{0} (id:{1})'.format(disk_scsi_addr, disk_id)) for disk_id in existing_diskgroup['capacity_disks']: if disk_id not in capacity_disk_ids: disk_scsi_addr = [d['scsi_address'] for d in host_disks if d['id'] == disk_id][0] removed_capacity_disk_ids.append(disk_id) removed_capacity_disk_displays.append( '{0} (id:{1})'.format(disk_scsi_addr, disk_id)) log.debug('Disk group #%s: existing capacity disk ids: %s; added ' 'capacity disk ids: %s; removed capacity disk ids: %s', idx, existing_capacity_disk_displays, added_capacity_disk_displays, removed_capacity_disk_displays) #TODO revisit this when removing capacity disks is supported if removed_capacity_disk_ids: comments.append( 'Error removing capacity disk(s) {0} from disk group #{1}; ' 'operation is not supported.' ''.format(', '.join(['\'{0}\''.format(id) for id in removed_capacity_disk_displays]), idx)) log.error(comments[-1]) errors = True continue if added_capacity_disk_ids: # Capacity disks need to be added to disk group # Building a string representation of the capacity disks # that need to be added s = ', '.join(['\'{0}\''.format(id) for id in added_capacity_disk_displays]) if __opts__['test']: comments.append('State {0} will add ' 'capacity disk(s) {1} to disk group #{2}.' ''.format(name, s, idx)) log.info(comments[-1]) changes = True continue try: __salt__['vsphere.add_capacity_to_diskgroup']( cache_disk_id, added_capacity_disk_ids, safety_checks=False, service_instance=si) except VMwareSaltError as err: comments.append('Error adding capacity disk(s) {0} to ' 'disk group #{1}: {2}.'.format(s, idx, err)) log.error(comments[-1]) errors = True continue com = ('Added capacity disk(s) {0} to disk group #{1}' ''.format(s, idx)) log.info(com) comments.append(com) diskgroup_changes[six.text_type(idx)] = \ {'new': {'cache': cache_disk_display, 'capacity': capacity_disk_displays}, 'old': {'cache': cache_disk_display, 'capacity': existing_capacity_disk_displays}} changes = True continue # No capacity needs to be added s = ('Disk group #{0} is correctly configured. Nothing to be done.' ''.format(idx)) log.info(s) comments.append(s) __salt__['vsphere.disconnect'](si) #Build the final return message result = (True if not (changes or errors) else # no changes/errors None if __opts__['test'] else # running in test mode False if errors else True) # found errors; defaults to True ret.update({'result': result, 'comment': '\n'.join(comments), 'changes': diskgroup_changes}) return ret @depends(HAS_PYVMOMI) @depends(HAS_JSONSCHEMA) def host_cache_configured(name, enabled, datastore, swap_size='100%', dedicated_backing_disk=False, erase_backing_disk=False): ''' Configures the host cache used for swapping. It will do the following: 1. Checks if backing disk exists 2. Creates the VMFS datastore if doesn't exist (datastore partition will be created and use the entire disk) 3. Raises an error if ``dedicated_backing_disk`` is ``True`` and partitions already exist on the backing disk 4. Configures host_cache to use a portion of the datastore for caching (either a specific size or a percentage of the datastore) Examples Percentage swap size (can't be 100%) .. code:: python { 'enabled': true, 'datastore': { 'backing_disk_scsi_addr': 'vmhba0:C0:T0:L0', 'vmfs_version': 5, 'name': 'hostcache' } 'dedicated_backing_disk': false 'swap_size': '98%', } Fixed sized swap size .. code:: python { 'enabled': true, 'datastore': { 'backing_disk_scsi_addr': 'vmhba0:C0:T0:L0', 'vmfs_version': 5, 'name': 'hostcache' } 'dedicated_backing_disk': true 'swap_size': '10GiB', } name Mandatory state name. enabled Specifies whether the host cache is enabled. datastore Specifies the host cache datastore. swap_size Specifies the size of the host cache swap. Can be a percentage or a value in GiB. Default value is ``100%``. dedicated_backing_disk Specifies whether the backing disk is dedicated to the host cache which means it must have no other partitions. Default is False erase_backing_disk Specifies whether to erase all partitions on the backing disk before the datastore is created. Default value is False. ''' log.trace('enabled = %s', enabled) log.trace('datastore = %s', datastore) log.trace('swap_size = %s', swap_size) log.trace('erase_backing_disk = %s', erase_backing_disk) # Variable used to return the result of the invocation proxy_details = __salt__['esxi.get_details']() hostname = proxy_details['host'] if not proxy_details.get('vcenter') \ else proxy_details['esxi_host'] log.trace('hostname = %s', hostname) log.info('Running host_cache_swap_configured for host \'%s\'', hostname) ret = {'name': hostname, 'comment': 'Default comments', 'result': None, 'changes': {}} result = None if __opts__['test'] else True # We assume success needs_setting = False comments = [] changes = {} si = None try: log.debug('Validating host_cache_configured input') schema = HostCacheSchema.serialize() try: jsonschema.validate({'enabled': enabled, 'datastore': datastore, 'swap_size': swap_size, 'erase_backing_disk': erase_backing_disk}, schema) except jsonschema.exceptions.ValidationError as exc: raise InvalidConfigError(exc) m = re.match(r'(\d+)(%|GiB)', swap_size) swap_size_value = int(m.group(1)) swap_type = m.group(2) log.trace('swap_size_value = %s; swap_type = %s', swap_size_value, swap_type) si = __salt__['vsphere.get_service_instance_via_proxy']() host_cache = __salt__['vsphere.get_host_cache'](service_instance=si) # Check enabled if host_cache['enabled'] != enabled: changes.update({'enabled': {'old': host_cache['enabled'], 'new': enabled}}) needs_setting = True # Check datastores existing_datastores = None if host_cache.get('datastore'): existing_datastores = \ __salt__['vsphere.list_datastores_via_proxy']( datastore_names=[datastore['name']], service_instance=si) # Retrieve backing disks existing_disks = __salt__['vsphere.list_disks']( scsi_addresses=[datastore['backing_disk_scsi_addr']], service_instance=si) if not existing_disks: raise VMwareObjectRetrievalError( 'Disk with scsi address \'{0}\' was not found in host \'{1}\'' ''.format(datastore['backing_disk_scsi_addr'], hostname)) backing_disk = existing_disks[0] backing_disk_display = '{0} (id:{1})'.format( backing_disk['scsi_address'], backing_disk['id']) log.trace('backing_disk = %s', backing_disk_display) existing_datastore = None if not existing_datastores: # Check if disk needs to be erased if erase_backing_disk: if __opts__['test']: comments.append('State {0} will erase ' 'the backing disk \'{1}\' on host \'{2}\'.' ''.format(name, backing_disk_display, hostname)) log.info(comments[-1]) else: # Erase disk __salt__['vsphere.erase_disk_partitions']( disk_id=backing_disk['id'], service_instance=si) comments.append('Erased backing disk \'{0}\' on host ' '\'{1}\'.'.format(backing_disk_display, hostname)) log.info(comments[-1]) # Create the datastore if __opts__['test']: comments.append('State {0} will create ' 'the datastore \'{1}\', with backing disk ' '\'{2}\', on host \'{3}\'.' ''.format(name, datastore['name'], backing_disk_display, hostname)) log.info(comments[-1]) else: if dedicated_backing_disk: # Check backing disk doesn't already have partitions partitions = __salt__['vsphere.list_disk_partitions']( disk_id=backing_disk['id'], service_instance=si) log.trace('partitions = %s', partitions) # We will ignore the mbr partitions non_mbr_partitions = [p for p in partitions if p['format'] != 'mbr'] if non_mbr_partitions: raise VMwareApiError( 'Backing disk \'{0}\' has unexpected partitions' ''.format(backing_disk_display)) __salt__['vsphere.create_vmfs_datastore']( datastore['name'], existing_disks[0]['id'], datastore['vmfs_version'], service_instance=si) comments.append('Created vmfs datastore \'{0}\', backed by ' 'disk \'{1}\', on host \'{2}\'.' ''.format(datastore['name'], backing_disk_display, hostname)) log.info(comments[-1]) changes.update( {'datastore': {'new': {'name': datastore['name'], 'backing_disk': backing_disk_display}}}) existing_datastore = \ __salt__['vsphere.list_datastores_via_proxy']( datastore_names=[datastore['name']], service_instance=si)[0] needs_setting = True else: # Check datastore is backed by the correct disk if not existing_datastores[0].get('backing_disk_ids'): raise VMwareSaltError('Datastore \'{0}\' doesn\'t have a ' 'backing disk' ''.format(datastore['name'])) if backing_disk['id'] not in \ existing_datastores[0]['backing_disk_ids']: raise VMwareSaltError( 'Datastore \'{0}\' is not backed by the correct disk: ' 'expected \'{1}\'; got {2}' ''.format( datastore['name'], backing_disk['id'], ', '.join( ['\'{0}\''.format(disk) for disk in existing_datastores[0]['backing_disk_ids']]))) comments.append('Datastore \'{0}\' already exists on host \'{1}\' ' 'and is backed by disk \'{2}\'. Nothing to be ' 'done.'.format(datastore['name'], hostname, backing_disk_display)) existing_datastore = existing_datastores[0] log.trace('existing_datastore = %s', existing_datastore) log.info(comments[-1]) if existing_datastore: # The following comparisons can be done if the existing_datastore # is set; it may not be set if running in test mode # # We support percent, as well as MiB, we will convert the size # to MiB, multiples of 1024 (VMware SDK limitation) if swap_type == '%': # Percentage swap size # Convert from bytes to MiB raw_size_MiB = (swap_size_value/100.0) * \ (existing_datastore['capacity']/1024/1024) else: raw_size_MiB = swap_size_value * 1024 log.trace('raw_size = %sMiB', raw_size_MiB) swap_size_MiB = int(raw_size_MiB/1024)*1024 log.trace('adjusted swap_size = %sMiB', swap_size_MiB) existing_swap_size_MiB = 0 m = re.match(r'(\d+)MiB', host_cache.get('swap_size')) if \ host_cache.get('swap_size') else None if m: # if swap_size from the host is set and has an expected value # we are going to parse it to get the number of MiBs existing_swap_size_MiB = int(m.group(1)) if not existing_swap_size_MiB == swap_size_MiB: needs_setting = True changes.update( {'swap_size': {'old': '{}GiB'.format(existing_swap_size_MiB/1024), 'new': '{}GiB'.format(swap_size_MiB/1024)}}) if needs_setting: if __opts__['test']: comments.append('State {0} will configure ' 'the host cache on host \'{1}\' to: {2}.' ''.format(name, hostname, {'enabled': enabled, 'datastore_name': datastore['name'], 'swap_size': swap_size})) else: if (existing_datastore['capacity'] / 1024.0**2) < \ swap_size_MiB: raise ArgumentValueError( 'Capacity of host cache datastore \'{0}\' ({1} MiB) is ' 'smaller than the required swap size ({2} MiB)' ''.format(existing_datastore['name'], existing_datastore['capacity'] / 1024.0**2, swap_size_MiB)) __salt__['vsphere.configure_host_cache']( enabled, datastore['name'], swap_size_MiB=swap_size_MiB, service_instance=si) comments.append('Host cache configured on host ' '\'{0}\'.'.format(hostname)) else: comments.append('Host cache on host \'{0}\' is already correctly ' 'configured. Nothing to be done.'.format(hostname)) result = True __salt__['vsphere.disconnect'](si) log.info(comments[-1]) ret.update({'comment': '\n'.join(comments), 'result': result, 'changes': changes}) return ret except CommandExecutionError as err: log.error('Error: %s.', err) if si: __salt__['vsphere.disconnect'](si) ret.update({ 'result': False if not __opts__['test'] else None, 'comment': '{}.'.format(err)}) return ret def _lookup_syslog_config(config): ''' Helper function that looks up syslog_config keys available from ``vsphere.get_syslog_config``. ''' lookup = {'default-timeout': 'Default Network Retry Timeout', 'logdir': 'Local Log Output', 'default-size': 'Local Logging Default Rotation Size', 'logdir-unique': 'Log To Unique Subdirectory', 'default-rotate': 'Local Logging Default Rotations', 'loghost': 'Remote Host'} return lookup.get(config) def _strip_key(key_string): ''' Strips an SSH key string of white space and line endings and returns the new string. key_string The string to be stripped. ''' key_string.strip() key_string.replace('\n', '') key_string.replace('\r\n', '') return key_string
saltstack/salt
salt/states/esxi.py
syslog_configured
python
def syslog_configured(name, syslog_configs, firewall=True, reset_service=True, reset_syslog_config=False, reset_configs=None): ''' Ensures the specified syslog configuration parameters. By default, this state will reset the syslog service after any new or changed parameters are set successfully. name Name of the state. syslog_configs Name of parameter to set (corresponds to the command line switch for esxcli without the double dashes (--)) Valid syslog_config values are ``logdir``, ``loghost``, ``logdir-unique``, ``default-rotate``, ``default-size``, and ``default-timeout``. Each syslog_config option also needs a configuration value to set. For example, ``loghost`` requires URLs or IP addresses to use for logging. Multiple log servers can be specified by listing them, comma-separated, but without spaces before or after commas (reference: https://blogs.vmware.com/vsphere/2012/04/configuring-multiple-syslog-servers-for-esxi-5.html) firewall Enable the firewall rule set for syslog. Defaults to ``True``. reset_service After a successful parameter set, reset the service. Defaults to ``True``. reset_syslog_config Resets the syslog service to it's default settings. Defaults to ``False``. If set to ``True``, default settings defined by the list of syslog configs in ``reset_configs`` will be reset before running any other syslog settings. reset_configs A comma-delimited list of parameters to reset. Only runs if ``reset_syslog_config`` is set to ``True``. If ``reset_syslog_config`` is set to ``True``, but no syslog configs are listed in ``reset_configs``, then ``reset_configs`` will be set to ``all`` by default. See ``syslog_configs`` parameter above for a list of valid options. Example: .. code-block:: yaml configure-host-syslog: esxi.syslog_configured: - syslog_configs: loghost: ssl://localhost:5432,tcp://10.1.0.1:1514 default-timeout: 120 - firewall: True - reset_service: True - reset_syslog_config: True - reset_configs: loghost,default-timeout ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} esxi_cmd = 'esxi.cmd' host = __pillar__['proxy']['host'] if reset_syslog_config: if not reset_configs: reset_configs = 'all' # Only run the command if not using test=True if not __opts__['test']: reset = __salt__[esxi_cmd]('reset_syslog_config', syslog_config=reset_configs).get(host) for key, val in six.iteritems(reset): if isinstance(val, bool): continue if not val.get('success'): msg = val.get('message') if not msg: msg = 'There was an error resetting a syslog config \'{0}\'.' \ 'Please check debug logs.'.format(val) ret['comment'] = 'Error: {0}'.format(msg) return ret ret['changes'].update({'reset_syslog_config': {'old': '', 'new': reset_configs}}) current_firewall = __salt__[esxi_cmd]('get_firewall_status').get(host) error = current_firewall.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret current_firewall = current_firewall.get('rulesets').get('syslog') if current_firewall != firewall: # Only run the command if not using test=True if not __opts__['test']: enabled = __salt__[esxi_cmd]('enable_firewall_ruleset', ruleset_enable=firewall, ruleset_name='syslog').get(host) if enabled.get('retcode') != 0: err = enabled.get('stderr') out = enabled.get('stdout') ret['comment'] = 'Error: {0}'.format(err if err else out) return ret ret['changes'].update({'firewall': {'old': current_firewall, 'new': firewall}}) current_syslog_config = __salt__[esxi_cmd]('get_syslog_config').get(host) for key, val in six.iteritems(syslog_configs): # The output of get_syslog_config has different keys than the keys # Used to set syslog_config values. We need to look them up first. try: lookup_key = _lookup_syslog_config(key) except KeyError: ret['comment'] = '\'{0}\' is not a valid config variable.'.format(key) return ret current_val = current_syslog_config[lookup_key] if six.text_type(current_val) != six.text_type(val): # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('set_syslog_config', syslog_config=key, config_value=val, firewall=firewall, reset_service=reset_service).get(host) success = response.get(key).get('success') if not success: msg = response.get(key).get('message') if not msg: msg = 'There was an error setting syslog config \'{0}\'. ' \ 'Please check debug logs.'.format(key) ret['comment'] = msg return ret if not ret['changes'].get('syslog_config'): ret['changes'].update({'syslog_config': {}}) ret['changes']['syslog_config'].update({key: {'old': current_val, 'new': val}}) ret['result'] = True if ret['changes'] == {}: ret['comment'] = 'Syslog is already in the desired state.' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'Syslog state will change.' return ret
Ensures the specified syslog configuration parameters. By default, this state will reset the syslog service after any new or changed parameters are set successfully. name Name of the state. syslog_configs Name of parameter to set (corresponds to the command line switch for esxcli without the double dashes (--)) Valid syslog_config values are ``logdir``, ``loghost``, ``logdir-unique``, ``default-rotate``, ``default-size``, and ``default-timeout``. Each syslog_config option also needs a configuration value to set. For example, ``loghost`` requires URLs or IP addresses to use for logging. Multiple log servers can be specified by listing them, comma-separated, but without spaces before or after commas (reference: https://blogs.vmware.com/vsphere/2012/04/configuring-multiple-syslog-servers-for-esxi-5.html) firewall Enable the firewall rule set for syslog. Defaults to ``True``. reset_service After a successful parameter set, reset the service. Defaults to ``True``. reset_syslog_config Resets the syslog service to it's default settings. Defaults to ``False``. If set to ``True``, default settings defined by the list of syslog configs in ``reset_configs`` will be reset before running any other syslog settings. reset_configs A comma-delimited list of parameters to reset. Only runs if ``reset_syslog_config`` is set to ``True``. If ``reset_syslog_config`` is set to ``True``, but no syslog configs are listed in ``reset_configs``, then ``reset_configs`` will be set to ``all`` by default. See ``syslog_configs`` parameter above for a list of valid options. Example: .. code-block:: yaml configure-host-syslog: esxi.syslog_configured: - syslog_configs: loghost: ssl://localhost:5432,tcp://10.1.0.1:1514 default-timeout: 120 - firewall: True - reset_service: True - reset_syslog_config: True - reset_configs: loghost,default-timeout
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/esxi.py#L869-L1025
[ "def iteritems(d, **kw):\n return d.iteritems(**kw)\n", "def _lookup_syslog_config(config):\n '''\n Helper function that looks up syslog_config keys available from\n ``vsphere.get_syslog_config``.\n '''\n lookup = {'default-timeout': 'Default Network Retry Timeout',\n 'logdir': 'Local Log Output',\n 'default-size': 'Local Logging Default Rotation Size',\n 'logdir-unique': 'Log To Unique Subdirectory',\n 'default-rotate': 'Local Logging Default Rotations',\n 'loghost': 'Remote Host'}\n\n return lookup.get(config)\n" ]
# -*- coding: utf-8 -*- ''' Manage VMware ESXi Hosts. .. versionadded:: 2015.8.4 Dependencies ============ - pyVmomi Python Module - ESXCLI pyVmomi ------- PyVmomi can be installed via pip: .. code-block:: bash pip install pyVmomi .. note:: Version 6.0 of pyVmomi has some problems with SSL error handling on certain versions of Python. If using version 6.0 of pyVmomi, Python 2.6, Python 2.7.9, or newer must be present. This is due to an upstream dependency in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the version of Python is not in the supported range, you will need to install an earlier version of pyVmomi. See `Issue #29537`_ for more information. .. _Issue #29537: https://github.com/saltstack/salt/issues/29537 Based on the note above, to install an earlier version of pyVmomi than the version currently listed in PyPi, run the following: .. code-block:: bash pip install pyVmomi==5.5.0.2014.1.1 The 5.5.0.2014.1.1 is a known stable version that this original ESXi State Module was developed against. ESXCLI ------ Currently, about a third of the functions used in the vSphere Execution Module require the ESXCLI package be installed on the machine running the Proxy Minion process. The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware provides vCLI package installation instructions for `vSphere 5.5`_ and `vSphere 6.0`_. .. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html .. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html Once all of the required dependencies are in place and the vCLI package is installed, you can check to see if you can connect to your ESXi host or vCenter server by running the following command: .. code-block:: bash esxcli -s <host-location> -u <username> -p <password> system syslog config get If the connection was successful, ESXCLI was successfully installed on your system. You should see output related to the ESXi host's syslog configuration. .. note:: Be aware that some functionality in this state module may depend on the type of license attached to the ESXi host. For example, certain services are only available to manipulate service state or policies with a VMware vSphere Enterprise or Enterprise Plus license, while others are available with a Standard license. The ``ntpd`` service is restricted to an Enterprise Plus license, while ``ssh`` is available via the Standard license. Please see the `vSphere Comparison`_ page for more information. .. _vSphere Comparison: https://www.vmware.com/products/vsphere/compare About ----- This state module was written to be used in conjunction with Salt's :mod:`ESXi Proxy Minion <salt.proxy.esxi>`. For a tutorial on how to use Salt's ESXi Proxy Minion, please refer to the :ref:`ESXi Proxy Minion Tutorial <tutorial-esxi-proxy>` for configuration examples, dependency installation instructions, how to run remote execution functions against ESXi hosts via a Salt Proxy Minion, and a larger state example. ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import logging import sys import re # Import Salt Libs from salt.ext import six import salt.utils.files from salt.exceptions import CommandExecutionError, InvalidConfigError, \ VMwareObjectRetrievalError, VMwareSaltError, VMwareApiError, \ ArgumentValueError from salt.utils.decorators import depends from salt.config.schemas.esxi import DiskGroupsDiskScsiAddressSchema, \ HostCacheSchema # External libraries try: import jsonschema HAS_JSONSCHEMA = True except ImportError: HAS_JSONSCHEMA = False # Get Logging Started log = logging.getLogger(__name__) try: from pyVmomi import VmomiSupport # We check the supported vim versions to infer the pyVmomi version if 'vim25/6.0' in VmomiSupport.versionMap and \ sys.version_info > (2, 7) and sys.version_info < (2, 7, 9): log.debug('pyVmomi not loaded: Incompatible versions ' 'of Python. See Issue #29537.') raise ImportError() HAS_PYVMOMI = True except ImportError: HAS_PYVMOMI = False def __virtual__(): return 'esxi.cmd' in __salt__ def coredump_configured(name, enabled, dump_ip, host_vnic='vmk0', dump_port=6500): ''' Ensures a host's core dump configuration. name Name of the state. enabled Sets whether or not ESXi core dump collection should be enabled. This is a boolean value set to ``True`` or ``False`` to enable or disable core dumps. Note that ESXi requires that the core dump must be enabled before any other parameters may be set. This also affects the ``changes`` results in the state return dictionary. If ``enabled`` is ``False``, we can't obtain any previous settings to compare other state variables, resulting in many ``old`` references returning ``None``. Once ``enabled`` is ``True`` the ``changes`` dictionary comparisons will be more accurate. This is due to the way the system coredemp network configuration command returns data. dump_ip The IP address of host that will accept the dump. host_vnic Host VNic port through which to communicate. Defaults to ``vmk0``. dump_port TCP port to use for the dump. Defaults to ``6500``. Example: .. code-block:: yaml configure-host-coredump: esxi.coredump_configured: - enabled: True - dump_ip: 'my-coredump-ip.example.com' ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} esxi_cmd = 'esxi.cmd' enabled_msg = 'ESXi requires that the core dump must be enabled ' \ 'before any other parameters may be set.' host = __pillar__['proxy']['host'] current_config = __salt__[esxi_cmd]('get_coredump_network_config').get(host) error = current_config.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret current_config = current_config.get('Coredump Config') current_enabled = current_config.get('enabled') # Configure coredump enabled state, if there are changes. if current_enabled != enabled: enabled_changes = {'enabled': {'old': current_enabled, 'new': enabled}} # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('coredump_network_enable', enabled=enabled).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret # Allow users to disable core dump, but then return since # nothing else can be set if core dump is disabled. if not enabled: ret['result'] = True ret['comment'] = enabled_msg ret['changes'].update(enabled_changes) return ret ret['changes'].update(enabled_changes) elif not enabled: # If current_enabled and enabled match, but are both False, # We must return before configuring anything. This isn't a # failure as core dump may be disabled intentionally. ret['result'] = True ret['comment'] = enabled_msg return ret # Test for changes with all remaining configurations. The changes flag is used # To detect changes, and then set_coredump_network_config is called one time. changes = False current_ip = current_config.get('ip') if current_ip != dump_ip: ret['changes'].update({'dump_ip': {'old': current_ip, 'new': dump_ip}}) changes = True current_vnic = current_config.get('host_vnic') if current_vnic != host_vnic: ret['changes'].update({'host_vnic': {'old': current_vnic, 'new': host_vnic}}) changes = True current_port = current_config.get('port') if current_port != six.text_type(dump_port): ret['changes'].update({'dump_port': {'old': current_port, 'new': six.text_type(dump_port)}}) changes = True # Only run the command if not using test=True and changes were detected. if not __opts__['test'] and changes is True: response = __salt__[esxi_cmd]('set_coredump_network_config', dump_ip=dump_ip, host_vnic=host_vnic, dump_port=dump_port).get(host) if response.get('success') is False: msg = response.get('stderr') if not msg: msg = response.get('stdout') ret['comment'] = 'Error: {0}'.format(msg) return ret ret['result'] = True if ret['changes'] == {}: ret['comment'] = 'Core Dump configuration is already in the desired state.' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'Core dump configuration will change.' return ret def password_present(name, password): ''' Ensures the given password is set on the ESXi host. Passwords cannot be obtained from host, so if a password is set in this state, the ``vsphere.update_host_password`` function will always run (except when using test=True functionality) and the state's changes dictionary will always be populated. The username for which the password will change is the same username that is used to authenticate against the ESXi host via the Proxy Minion. For example, if the pillar definition for the proxy username is defined as ``root``, then the username that the password will be updated for via this state is ``root``. name Name of the state. password The new password to change on the host. Example: .. code-block:: yaml configure-host-password: esxi.password_present: - password: 'new-bad-password' ''' ret = {'name': name, 'result': True, 'changes': {'old': 'unknown', 'new': '********'}, 'comment': 'Host password was updated.'} esxi_cmd = 'esxi.cmd' if __opts__['test']: ret['result'] = None ret['comment'] = 'Host password will change.' return ret else: try: __salt__[esxi_cmd]('update_host_password', new_password=password) except CommandExecutionError as err: ret['result'] = False ret['comment'] = 'Error: {0}'.format(err) return ret return ret def ntp_configured(name, service_running, ntp_servers=None, service_policy=None, service_restart=False, update_datetime=False): ''' Ensures a host's NTP server configuration such as setting NTP servers, ensuring the NTP daemon is running or stopped, or restarting the NTP daemon for the ESXi host. name Name of the state. service_running Ensures the running state of the ntp daemon for the host. Boolean value where ``True`` indicates that ntpd should be running and ``False`` indicates that it should be stopped. ntp_servers A list of servers that should be added to the ESXi host's NTP configuration. service_policy The policy to set for the NTP service. .. note:: When setting the service policy to ``off`` or ``on``, you *must* quote the setting. If you don't, the yaml parser will set the string to a boolean, which will cause trouble checking for stateful changes and will error when trying to set the policy on the ESXi host. service_restart If set to ``True``, the ntp daemon will be restarted, regardless of its previous running state. Default is ``False``. update_datetime If set to ``True``, the date/time on the given host will be updated to UTC. Default setting is ``False``. This option should be used with caution since network delays and execution delays can result in time skews. Example: .. code-block:: yaml configure-host-ntp: esxi.ntp_configured: - service_running: True - ntp_servers: - 192.174.1.100 - 192.174.1.200 - service_policy: 'on' - service_restart: True ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} esxi_cmd = 'esxi.cmd' host = __pillar__['proxy']['host'] ntpd = 'ntpd' ntp_config = __salt__[esxi_cmd]('get_ntp_config').get(host) ntp_running = __salt__[esxi_cmd]('get_service_running', service_name=ntpd).get(host) error = ntp_running.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ntp_running = ntp_running.get(ntpd) # Configure NTP Servers for the Host if ntp_servers and set(ntp_servers) != set(ntp_config): # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('set_ntp_config', ntp_servers=ntp_servers).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret # Set changes dictionary for ntp_servers ret['changes'].update({'ntp_servers': {'old': ntp_config, 'new': ntp_servers}}) # Configure service_running state if service_running != ntp_running: # Only run the command if not using test=True if not __opts__['test']: # Start ntdp if service_running=True if ntp_running is True: response = __salt__[esxi_cmd]('service_start', service_name=ntpd).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret # Stop ntpd if service_running=False else: response = __salt__[esxi_cmd]('service_stop', service_name=ntpd).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'service_running': {'old': ntp_running, 'new': service_running}}) # Configure service_policy if service_policy: current_service_policy = __salt__[esxi_cmd]('get_service_policy', service_name=ntpd).get(host) error = current_service_policy.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret current_service_policy = current_service_policy.get(ntpd) if service_policy != current_service_policy: # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('set_service_policy', service_name=ntpd, service_policy=service_policy).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'service_policy': {'old': current_service_policy, 'new': service_policy}}) # Update datetime, if requested. if update_datetime: # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('update_host_datetime').get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'update_datetime': {'old': '', 'new': 'Host datetime was updated.'}}) # Restart ntp_service if service_restart=True if service_restart: # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('service_restart', service_name=ntpd).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'service_restart': {'old': '', 'new': 'NTP Daemon Restarted.'}}) ret['result'] = True if ret['changes'] == {}: ret['comment'] = 'NTP is already in the desired state.' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'NTP state will change.' return ret def vmotion_configured(name, enabled, device='vmk0'): ''' Configures a host's VMotion properties such as enabling VMotion and setting the device VirtualNic that VMotion will use. name Name of the state. enabled Ensures whether or not VMotion should be enabled on a host as a boolean value where ``True`` indicates that VMotion should be enabled and ``False`` indicates that VMotion should be disabled. device The device that uniquely identifies the VirtualNic that will be used for VMotion for the host. Defaults to ``vmk0``. Example: .. code-block:: yaml configure-vmotion: esxi.vmotion_configured: - enabled: True - device: sample-device ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} esxi_cmd = 'esxi.cmd' host = __pillar__['proxy']['host'] current_vmotion_enabled = __salt__[esxi_cmd]('get_vmotion_enabled').get(host) current_vmotion_enabled = current_vmotion_enabled.get('VMotion Enabled') # Configure VMotion Enabled state, if changed. if enabled != current_vmotion_enabled: # Only run the command if not using test=True if not __opts__['test']: # Enable VMotion if enabled=True if enabled is True: response = __salt__[esxi_cmd]('vmotion_enable', device=device).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret # Disable VMotion if enabled=False else: response = __salt__[esxi_cmd]('vmotion_disable').get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'enabled': {'old': current_vmotion_enabled, 'new': enabled}}) ret['result'] = True if ret['changes'] == {}: ret['comment'] = 'VMotion configuration is already in the desired state.' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'VMotion configuration will change.' return ret def vsan_configured(name, enabled, add_disks_to_vsan=False): ''' Configures a host's VSAN properties such as enabling or disabling VSAN, or adding VSAN-eligible disks to the VSAN system for the host. name Name of the state. enabled Ensures whether or not VSAN should be enabled on a host as a boolean value where ``True`` indicates that VSAN should be enabled and ``False`` indicates that VSAN should be disabled. add_disks_to_vsan If set to ``True``, any VSAN-eligible disks for the given host will be added to the host's VSAN system. Default is ``False``. Example: .. code-block:: yaml configure-host-vsan: esxi.vsan_configured: - enabled: True - add_disks_to_vsan: True ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} esxi_cmd = 'esxi.cmd' host = __pillar__['proxy']['host'] current_vsan_enabled = __salt__[esxi_cmd]('get_vsan_enabled').get(host) error = current_vsan_enabled.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret current_vsan_enabled = current_vsan_enabled.get('VSAN Enabled') # Configure VSAN Enabled state, if changed. if enabled != current_vsan_enabled: # Only run the command if not using test=True if not __opts__['test']: # Enable VSAN if enabled=True if enabled is True: response = __salt__[esxi_cmd]('vsan_enable').get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret # Disable VSAN if enabled=False else: response = __salt__[esxi_cmd]('vsan_disable').get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'enabled': {'old': current_vsan_enabled, 'new': enabled}}) # Add any eligible disks to VSAN, if requested. if add_disks_to_vsan: current_eligible_disks = __salt__[esxi_cmd]('get_vsan_eligible_disks').get(host) error = current_eligible_disks.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret disks = current_eligible_disks.get('Eligible') if disks and isinstance(disks, list): # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('vsan_add_disks').get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'add_disks_to_vsan': {'old': '', 'new': disks}}) ret['result'] = True if ret['changes'] == {}: ret['comment'] = 'VSAN configuration is already in the desired state.' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'VSAN configuration will change.' return ret def ssh_configured(name, service_running, ssh_key=None, ssh_key_file=None, service_policy=None, service_restart=False, certificate_verify=False): ''' Manage the SSH configuration for a host including whether or not SSH is running or the presence of a given SSH key. Note: Only one ssh key can be uploaded for root. Uploading a second key will replace any existing key. name Name of the state. service_running Ensures whether or not the SSH service should be running on a host. Represented as a boolean value where ``True`` indicates that SSH should be running and ``False`` indicates that SSH should stopped. In order to update SSH keys, the SSH service must be running. ssh_key Public SSH key to added to the authorized_keys file on the ESXi host. You can use ``ssh_key`` or ``ssh_key_file``, but not both. ssh_key_file File containing the public SSH key to be added to the authorized_keys file on the ESXi host. You can use ``ssh_key_file`` or ``ssh_key``, but not both. service_policy The policy to set for the NTP service. .. note:: When setting the service policy to ``off`` or ``on``, you *must* quote the setting. If you don't, the yaml parser will set the string to a boolean, which will cause trouble checking for stateful changes and will error when trying to set the policy on the ESXi host. service_restart If set to ``True``, the SSH service will be restarted, regardless of its previous running state. Default is ``False``. certificate_verify If set to ``True``, the SSL connection must present a valid certificate. Default is ``False``. Example: .. code-block:: yaml configure-host-ssh: esxi.ssh_configured: - service_running: True - ssh_key_file: /etc/salt/ssh_keys/my_key.pub - service_policy: 'on' - service_restart: True - certificate_verify: True ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} esxi_cmd = 'esxi.cmd' host = __pillar__['proxy']['host'] ssh = 'ssh' ssh_running = __salt__[esxi_cmd]('get_service_running', service_name=ssh).get(host) error = ssh_running.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ssh_running = ssh_running.get(ssh) # Configure SSH service_running state, if changed. if service_running != ssh_running: # Only actually run the command if not using test=True if not __opts__['test']: # Start SSH if service_running=True if service_running is True: enable = __salt__[esxi_cmd]('service_start', service_name=ssh).get(host) error = enable.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret # Disable SSH if service_running=False else: disable = __salt__[esxi_cmd]('service_stop', service_name=ssh).get(host) error = disable.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'service_running': {'old': ssh_running, 'new': service_running}}) # If uploading an SSH key or SSH key file, see if there's a current # SSH key and compare the current key to the key set in the state. current_ssh_key, ssh_key_changed = None, False if ssh_key or ssh_key_file: current_ssh_key = __salt__[esxi_cmd]('get_ssh_key', certificate_verify=certificate_verify) error = current_ssh_key.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret current_ssh_key = current_ssh_key.get('key') if current_ssh_key: clean_current_key = _strip_key(current_ssh_key).split(' ') if not ssh_key: ssh_key = '' # Open ssh key file and read in contents to create one key string with salt.utils.files.fopen(ssh_key_file, 'r') as key_file: for line in key_file: if line.startswith('#'): # Commented line continue ssh_key = ssh_key + line clean_ssh_key = _strip_key(ssh_key).split(' ') # Check that the first two list items of clean key lists are equal. if clean_current_key[0] != clean_ssh_key[0] or clean_current_key[1] != clean_ssh_key[1]: ssh_key_changed = True else: # If current_ssh_key is None, but we're setting a new key with # either ssh_key or ssh_key_file, then we need to flag the change. ssh_key_changed = True # Upload SSH key, if changed. if ssh_key_changed: if not __opts__['test']: # Upload key response = __salt__[esxi_cmd]('upload_ssh_key', ssh_key=ssh_key, ssh_key_file=ssh_key_file, certificate_verify=certificate_verify) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'SSH Key': {'old': current_ssh_key, 'new': ssh_key if ssh_key else ssh_key_file}}) # Configure service_policy if service_policy: current_service_policy = __salt__[esxi_cmd]('get_service_policy', service_name=ssh).get(host) error = current_service_policy.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret current_service_policy = current_service_policy.get(ssh) if service_policy != current_service_policy: # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('set_service_policy', service_name=ssh, service_policy=service_policy).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'service_policy': {'old': current_service_policy, 'new': service_policy}}) # Restart ssh_service if service_restart=True if service_restart: # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('service_restart', service_name=ssh).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'service_restart': {'old': '', 'new': 'SSH service restarted.'}}) ret['result'] = True if ret['changes'] == {}: ret['comment'] = 'SSH service is already in the desired state.' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'SSH service state will change.' return ret @depends(HAS_PYVMOMI) @depends(HAS_JSONSCHEMA) def diskgroups_configured(name, diskgroups, erase_disks=False): ''' Configures the disk groups to use for vsan. This function will do the following: 1. Check whether or not all disks in the diskgroup spec exist, and raises and errors if they do not. 2. Create diskgroups with the correct disk configurations if diskgroup (identified by the cache disk canonical name) doesn't exist 3. Adds extra capacity disks to the existing diskgroup Example: .. code:: python { 'cache_scsi_addr': 'vmhba1:C0:T0:L0', 'capacity_scsi_addrs': [ 'vmhba2:C0:T0:L0', 'vmhba3:C0:T0:L0', 'vmhba4:C0:T0:L0', ] } name Mandatory state name diskgroups Disk group representation containing scsi disk addresses. Scsi addresses are expected for disks in the diskgroup: erase_disks Specifies whether to erase all partitions on all disks member of the disk group before the disk group is created. Default value is False. ''' proxy_details = __salt__['esxi.get_details']() hostname = proxy_details['host'] if not proxy_details.get('vcenter') \ else proxy_details['esxi_host'] log.info('Running state %s for host \'%s\'', name, hostname) # Variable used to return the result of the invocation ret = {'name': name, 'result': None, 'changes': {}, 'comments': None} # Signals if errors have been encountered errors = False # Signals if changes are required changes = False comments = [] diskgroup_changes = {} si = None try: log.trace('Validating diskgroups_configured input') schema = DiskGroupsDiskScsiAddressSchema.serialize() try: jsonschema.validate({'diskgroups': diskgroups, 'erase_disks': erase_disks}, schema) except jsonschema.exceptions.ValidationError as exc: raise InvalidConfigError(exc) si = __salt__['vsphere.get_service_instance_via_proxy']() host_disks = __salt__['vsphere.list_disks'](service_instance=si) if not host_disks: raise VMwareObjectRetrievalError( 'No disks retrieved from host \'{0}\''.format(hostname)) scsi_addr_to_disk_map = {d['scsi_address']: d for d in host_disks} log.trace('scsi_addr_to_disk_map = %s', scsi_addr_to_disk_map) existing_diskgroups = \ __salt__['vsphere.list_diskgroups'](service_instance=si) cache_disk_to_existing_diskgroup_map = \ {dg['cache_disk']: dg for dg in existing_diskgroups} except CommandExecutionError as err: log.error('Error: %s', err) if si: __salt__['vsphere.disconnect'](si) ret.update({ 'result': False if not __opts__['test'] else None, 'comment': six.text_type(err)}) return ret # Iterate through all of the disk groups for idx, dg in enumerate(diskgroups): # Check for cache disk if not dg['cache_scsi_addr'] in scsi_addr_to_disk_map: comments.append('No cache disk with scsi address \'{0}\' was ' 'found.'.format(dg['cache_scsi_addr'])) log.error(comments[-1]) errors = True continue # Check for capacity disks cache_disk_id = scsi_addr_to_disk_map[dg['cache_scsi_addr']]['id'] cache_disk_display = '{0} (id:{1})'.format(dg['cache_scsi_addr'], cache_disk_id) bad_scsi_addrs = [] capacity_disk_ids = [] capacity_disk_displays = [] for scsi_addr in dg['capacity_scsi_addrs']: if scsi_addr not in scsi_addr_to_disk_map: bad_scsi_addrs.append(scsi_addr) continue capacity_disk_ids.append(scsi_addr_to_disk_map[scsi_addr]['id']) capacity_disk_displays.append( '{0} (id:{1})'.format(scsi_addr, capacity_disk_ids[-1])) if bad_scsi_addrs: comments.append('Error in diskgroup #{0}: capacity disks with ' 'scsi addresses {1} were not found.' ''.format(idx, ', '.join(['\'{0}\''.format(a) for a in bad_scsi_addrs]))) log.error(comments[-1]) errors = True continue if not cache_disk_to_existing_diskgroup_map.get(cache_disk_id): # A new diskgroup needs to be created log.trace('erase_disks = %s', erase_disks) if erase_disks: if __opts__['test']: comments.append('State {0} will ' 'erase all disks of disk group #{1}; ' 'cache disk: \'{2}\', ' 'capacity disk(s): {3}.' ''.format(name, idx, cache_disk_display, ', '.join( ['\'{}\''.format(a) for a in capacity_disk_displays]))) else: # Erase disk group disks for disk_id in [cache_disk_id] + capacity_disk_ids: __salt__['vsphere.erase_disk_partitions']( disk_id=disk_id, service_instance=si) comments.append('Erased disks of diskgroup #{0}; ' 'cache disk: \'{1}\', capacity disk(s): ' '{2}'.format( idx, cache_disk_display, ', '.join(['\'{0}\''.format(a) for a in capacity_disk_displays]))) log.info(comments[-1]) if __opts__['test']: comments.append('State {0} will create ' 'the disk group #{1}; cache disk: \'{2}\', ' 'capacity disk(s): {3}.' .format(name, idx, cache_disk_display, ', '.join(['\'{0}\''.format(a) for a in capacity_disk_displays]))) log.info(comments[-1]) changes = True continue try: __salt__['vsphere.create_diskgroup'](cache_disk_id, capacity_disk_ids, safety_checks=False, service_instance=si) except VMwareSaltError as err: comments.append('Error creating disk group #{0}: ' '{1}.'.format(idx, err)) log.error(comments[-1]) errors = True continue comments.append('Created disk group #\'{0}\'.'.format(idx)) log.info(comments[-1]) diskgroup_changes[six.text_type(idx)] = \ {'new': {'cache': cache_disk_display, 'capacity': capacity_disk_displays}} changes = True continue # The diskgroup exists; checking the capacity disks log.debug('Disk group #%s exists. Checking capacity disks: %s.', idx, capacity_disk_displays) existing_diskgroup = \ cache_disk_to_existing_diskgroup_map.get(cache_disk_id) existing_capacity_disk_displays = \ ['{0} (id:{1})'.format([d['scsi_address'] for d in host_disks if d['id'] == disk_id][0], disk_id) for disk_id in existing_diskgroup['capacity_disks']] # Populate added disks and removed disks and their displays added_capacity_disk_ids = [] added_capacity_disk_displays = [] removed_capacity_disk_ids = [] removed_capacity_disk_displays = [] for disk_id in capacity_disk_ids: if disk_id not in existing_diskgroup['capacity_disks']: disk_scsi_addr = [d['scsi_address'] for d in host_disks if d['id'] == disk_id][0] added_capacity_disk_ids.append(disk_id) added_capacity_disk_displays.append( '{0} (id:{1})'.format(disk_scsi_addr, disk_id)) for disk_id in existing_diskgroup['capacity_disks']: if disk_id not in capacity_disk_ids: disk_scsi_addr = [d['scsi_address'] for d in host_disks if d['id'] == disk_id][0] removed_capacity_disk_ids.append(disk_id) removed_capacity_disk_displays.append( '{0} (id:{1})'.format(disk_scsi_addr, disk_id)) log.debug('Disk group #%s: existing capacity disk ids: %s; added ' 'capacity disk ids: %s; removed capacity disk ids: %s', idx, existing_capacity_disk_displays, added_capacity_disk_displays, removed_capacity_disk_displays) #TODO revisit this when removing capacity disks is supported if removed_capacity_disk_ids: comments.append( 'Error removing capacity disk(s) {0} from disk group #{1}; ' 'operation is not supported.' ''.format(', '.join(['\'{0}\''.format(id) for id in removed_capacity_disk_displays]), idx)) log.error(comments[-1]) errors = True continue if added_capacity_disk_ids: # Capacity disks need to be added to disk group # Building a string representation of the capacity disks # that need to be added s = ', '.join(['\'{0}\''.format(id) for id in added_capacity_disk_displays]) if __opts__['test']: comments.append('State {0} will add ' 'capacity disk(s) {1} to disk group #{2}.' ''.format(name, s, idx)) log.info(comments[-1]) changes = True continue try: __salt__['vsphere.add_capacity_to_diskgroup']( cache_disk_id, added_capacity_disk_ids, safety_checks=False, service_instance=si) except VMwareSaltError as err: comments.append('Error adding capacity disk(s) {0} to ' 'disk group #{1}: {2}.'.format(s, idx, err)) log.error(comments[-1]) errors = True continue com = ('Added capacity disk(s) {0} to disk group #{1}' ''.format(s, idx)) log.info(com) comments.append(com) diskgroup_changes[six.text_type(idx)] = \ {'new': {'cache': cache_disk_display, 'capacity': capacity_disk_displays}, 'old': {'cache': cache_disk_display, 'capacity': existing_capacity_disk_displays}} changes = True continue # No capacity needs to be added s = ('Disk group #{0} is correctly configured. Nothing to be done.' ''.format(idx)) log.info(s) comments.append(s) __salt__['vsphere.disconnect'](si) #Build the final return message result = (True if not (changes or errors) else # no changes/errors None if __opts__['test'] else # running in test mode False if errors else True) # found errors; defaults to True ret.update({'result': result, 'comment': '\n'.join(comments), 'changes': diskgroup_changes}) return ret @depends(HAS_PYVMOMI) @depends(HAS_JSONSCHEMA) def host_cache_configured(name, enabled, datastore, swap_size='100%', dedicated_backing_disk=False, erase_backing_disk=False): ''' Configures the host cache used for swapping. It will do the following: 1. Checks if backing disk exists 2. Creates the VMFS datastore if doesn't exist (datastore partition will be created and use the entire disk) 3. Raises an error if ``dedicated_backing_disk`` is ``True`` and partitions already exist on the backing disk 4. Configures host_cache to use a portion of the datastore for caching (either a specific size or a percentage of the datastore) Examples Percentage swap size (can't be 100%) .. code:: python { 'enabled': true, 'datastore': { 'backing_disk_scsi_addr': 'vmhba0:C0:T0:L0', 'vmfs_version': 5, 'name': 'hostcache' } 'dedicated_backing_disk': false 'swap_size': '98%', } Fixed sized swap size .. code:: python { 'enabled': true, 'datastore': { 'backing_disk_scsi_addr': 'vmhba0:C0:T0:L0', 'vmfs_version': 5, 'name': 'hostcache' } 'dedicated_backing_disk': true 'swap_size': '10GiB', } name Mandatory state name. enabled Specifies whether the host cache is enabled. datastore Specifies the host cache datastore. swap_size Specifies the size of the host cache swap. Can be a percentage or a value in GiB. Default value is ``100%``. dedicated_backing_disk Specifies whether the backing disk is dedicated to the host cache which means it must have no other partitions. Default is False erase_backing_disk Specifies whether to erase all partitions on the backing disk before the datastore is created. Default value is False. ''' log.trace('enabled = %s', enabled) log.trace('datastore = %s', datastore) log.trace('swap_size = %s', swap_size) log.trace('erase_backing_disk = %s', erase_backing_disk) # Variable used to return the result of the invocation proxy_details = __salt__['esxi.get_details']() hostname = proxy_details['host'] if not proxy_details.get('vcenter') \ else proxy_details['esxi_host'] log.trace('hostname = %s', hostname) log.info('Running host_cache_swap_configured for host \'%s\'', hostname) ret = {'name': hostname, 'comment': 'Default comments', 'result': None, 'changes': {}} result = None if __opts__['test'] else True # We assume success needs_setting = False comments = [] changes = {} si = None try: log.debug('Validating host_cache_configured input') schema = HostCacheSchema.serialize() try: jsonschema.validate({'enabled': enabled, 'datastore': datastore, 'swap_size': swap_size, 'erase_backing_disk': erase_backing_disk}, schema) except jsonschema.exceptions.ValidationError as exc: raise InvalidConfigError(exc) m = re.match(r'(\d+)(%|GiB)', swap_size) swap_size_value = int(m.group(1)) swap_type = m.group(2) log.trace('swap_size_value = %s; swap_type = %s', swap_size_value, swap_type) si = __salt__['vsphere.get_service_instance_via_proxy']() host_cache = __salt__['vsphere.get_host_cache'](service_instance=si) # Check enabled if host_cache['enabled'] != enabled: changes.update({'enabled': {'old': host_cache['enabled'], 'new': enabled}}) needs_setting = True # Check datastores existing_datastores = None if host_cache.get('datastore'): existing_datastores = \ __salt__['vsphere.list_datastores_via_proxy']( datastore_names=[datastore['name']], service_instance=si) # Retrieve backing disks existing_disks = __salt__['vsphere.list_disks']( scsi_addresses=[datastore['backing_disk_scsi_addr']], service_instance=si) if not existing_disks: raise VMwareObjectRetrievalError( 'Disk with scsi address \'{0}\' was not found in host \'{1}\'' ''.format(datastore['backing_disk_scsi_addr'], hostname)) backing_disk = existing_disks[0] backing_disk_display = '{0} (id:{1})'.format( backing_disk['scsi_address'], backing_disk['id']) log.trace('backing_disk = %s', backing_disk_display) existing_datastore = None if not existing_datastores: # Check if disk needs to be erased if erase_backing_disk: if __opts__['test']: comments.append('State {0} will erase ' 'the backing disk \'{1}\' on host \'{2}\'.' ''.format(name, backing_disk_display, hostname)) log.info(comments[-1]) else: # Erase disk __salt__['vsphere.erase_disk_partitions']( disk_id=backing_disk['id'], service_instance=si) comments.append('Erased backing disk \'{0}\' on host ' '\'{1}\'.'.format(backing_disk_display, hostname)) log.info(comments[-1]) # Create the datastore if __opts__['test']: comments.append('State {0} will create ' 'the datastore \'{1}\', with backing disk ' '\'{2}\', on host \'{3}\'.' ''.format(name, datastore['name'], backing_disk_display, hostname)) log.info(comments[-1]) else: if dedicated_backing_disk: # Check backing disk doesn't already have partitions partitions = __salt__['vsphere.list_disk_partitions']( disk_id=backing_disk['id'], service_instance=si) log.trace('partitions = %s', partitions) # We will ignore the mbr partitions non_mbr_partitions = [p for p in partitions if p['format'] != 'mbr'] if non_mbr_partitions: raise VMwareApiError( 'Backing disk \'{0}\' has unexpected partitions' ''.format(backing_disk_display)) __salt__['vsphere.create_vmfs_datastore']( datastore['name'], existing_disks[0]['id'], datastore['vmfs_version'], service_instance=si) comments.append('Created vmfs datastore \'{0}\', backed by ' 'disk \'{1}\', on host \'{2}\'.' ''.format(datastore['name'], backing_disk_display, hostname)) log.info(comments[-1]) changes.update( {'datastore': {'new': {'name': datastore['name'], 'backing_disk': backing_disk_display}}}) existing_datastore = \ __salt__['vsphere.list_datastores_via_proxy']( datastore_names=[datastore['name']], service_instance=si)[0] needs_setting = True else: # Check datastore is backed by the correct disk if not existing_datastores[0].get('backing_disk_ids'): raise VMwareSaltError('Datastore \'{0}\' doesn\'t have a ' 'backing disk' ''.format(datastore['name'])) if backing_disk['id'] not in \ existing_datastores[0]['backing_disk_ids']: raise VMwareSaltError( 'Datastore \'{0}\' is not backed by the correct disk: ' 'expected \'{1}\'; got {2}' ''.format( datastore['name'], backing_disk['id'], ', '.join( ['\'{0}\''.format(disk) for disk in existing_datastores[0]['backing_disk_ids']]))) comments.append('Datastore \'{0}\' already exists on host \'{1}\' ' 'and is backed by disk \'{2}\'. Nothing to be ' 'done.'.format(datastore['name'], hostname, backing_disk_display)) existing_datastore = existing_datastores[0] log.trace('existing_datastore = %s', existing_datastore) log.info(comments[-1]) if existing_datastore: # The following comparisons can be done if the existing_datastore # is set; it may not be set if running in test mode # # We support percent, as well as MiB, we will convert the size # to MiB, multiples of 1024 (VMware SDK limitation) if swap_type == '%': # Percentage swap size # Convert from bytes to MiB raw_size_MiB = (swap_size_value/100.0) * \ (existing_datastore['capacity']/1024/1024) else: raw_size_MiB = swap_size_value * 1024 log.trace('raw_size = %sMiB', raw_size_MiB) swap_size_MiB = int(raw_size_MiB/1024)*1024 log.trace('adjusted swap_size = %sMiB', swap_size_MiB) existing_swap_size_MiB = 0 m = re.match(r'(\d+)MiB', host_cache.get('swap_size')) if \ host_cache.get('swap_size') else None if m: # if swap_size from the host is set and has an expected value # we are going to parse it to get the number of MiBs existing_swap_size_MiB = int(m.group(1)) if not existing_swap_size_MiB == swap_size_MiB: needs_setting = True changes.update( {'swap_size': {'old': '{}GiB'.format(existing_swap_size_MiB/1024), 'new': '{}GiB'.format(swap_size_MiB/1024)}}) if needs_setting: if __opts__['test']: comments.append('State {0} will configure ' 'the host cache on host \'{1}\' to: {2}.' ''.format(name, hostname, {'enabled': enabled, 'datastore_name': datastore['name'], 'swap_size': swap_size})) else: if (existing_datastore['capacity'] / 1024.0**2) < \ swap_size_MiB: raise ArgumentValueError( 'Capacity of host cache datastore \'{0}\' ({1} MiB) is ' 'smaller than the required swap size ({2} MiB)' ''.format(existing_datastore['name'], existing_datastore['capacity'] / 1024.0**2, swap_size_MiB)) __salt__['vsphere.configure_host_cache']( enabled, datastore['name'], swap_size_MiB=swap_size_MiB, service_instance=si) comments.append('Host cache configured on host ' '\'{0}\'.'.format(hostname)) else: comments.append('Host cache on host \'{0}\' is already correctly ' 'configured. Nothing to be done.'.format(hostname)) result = True __salt__['vsphere.disconnect'](si) log.info(comments[-1]) ret.update({'comment': '\n'.join(comments), 'result': result, 'changes': changes}) return ret except CommandExecutionError as err: log.error('Error: %s.', err) if si: __salt__['vsphere.disconnect'](si) ret.update({ 'result': False if not __opts__['test'] else None, 'comment': '{}.'.format(err)}) return ret def _lookup_syslog_config(config): ''' Helper function that looks up syslog_config keys available from ``vsphere.get_syslog_config``. ''' lookup = {'default-timeout': 'Default Network Retry Timeout', 'logdir': 'Local Log Output', 'default-size': 'Local Logging Default Rotation Size', 'logdir-unique': 'Log To Unique Subdirectory', 'default-rotate': 'Local Logging Default Rotations', 'loghost': 'Remote Host'} return lookup.get(config) def _strip_key(key_string): ''' Strips an SSH key string of white space and line endings and returns the new string. key_string The string to be stripped. ''' key_string.strip() key_string.replace('\n', '') key_string.replace('\r\n', '') return key_string
saltstack/salt
salt/states/esxi.py
diskgroups_configured
python
def diskgroups_configured(name, diskgroups, erase_disks=False): ''' Configures the disk groups to use for vsan. This function will do the following: 1. Check whether or not all disks in the diskgroup spec exist, and raises and errors if they do not. 2. Create diskgroups with the correct disk configurations if diskgroup (identified by the cache disk canonical name) doesn't exist 3. Adds extra capacity disks to the existing diskgroup Example: .. code:: python { 'cache_scsi_addr': 'vmhba1:C0:T0:L0', 'capacity_scsi_addrs': [ 'vmhba2:C0:T0:L0', 'vmhba3:C0:T0:L0', 'vmhba4:C0:T0:L0', ] } name Mandatory state name diskgroups Disk group representation containing scsi disk addresses. Scsi addresses are expected for disks in the diskgroup: erase_disks Specifies whether to erase all partitions on all disks member of the disk group before the disk group is created. Default value is False. ''' proxy_details = __salt__['esxi.get_details']() hostname = proxy_details['host'] if not proxy_details.get('vcenter') \ else proxy_details['esxi_host'] log.info('Running state %s for host \'%s\'', name, hostname) # Variable used to return the result of the invocation ret = {'name': name, 'result': None, 'changes': {}, 'comments': None} # Signals if errors have been encountered errors = False # Signals if changes are required changes = False comments = [] diskgroup_changes = {} si = None try: log.trace('Validating diskgroups_configured input') schema = DiskGroupsDiskScsiAddressSchema.serialize() try: jsonschema.validate({'diskgroups': diskgroups, 'erase_disks': erase_disks}, schema) except jsonschema.exceptions.ValidationError as exc: raise InvalidConfigError(exc) si = __salt__['vsphere.get_service_instance_via_proxy']() host_disks = __salt__['vsphere.list_disks'](service_instance=si) if not host_disks: raise VMwareObjectRetrievalError( 'No disks retrieved from host \'{0}\''.format(hostname)) scsi_addr_to_disk_map = {d['scsi_address']: d for d in host_disks} log.trace('scsi_addr_to_disk_map = %s', scsi_addr_to_disk_map) existing_diskgroups = \ __salt__['vsphere.list_diskgroups'](service_instance=si) cache_disk_to_existing_diskgroup_map = \ {dg['cache_disk']: dg for dg in existing_diskgroups} except CommandExecutionError as err: log.error('Error: %s', err) if si: __salt__['vsphere.disconnect'](si) ret.update({ 'result': False if not __opts__['test'] else None, 'comment': six.text_type(err)}) return ret # Iterate through all of the disk groups for idx, dg in enumerate(diskgroups): # Check for cache disk if not dg['cache_scsi_addr'] in scsi_addr_to_disk_map: comments.append('No cache disk with scsi address \'{0}\' was ' 'found.'.format(dg['cache_scsi_addr'])) log.error(comments[-1]) errors = True continue # Check for capacity disks cache_disk_id = scsi_addr_to_disk_map[dg['cache_scsi_addr']]['id'] cache_disk_display = '{0} (id:{1})'.format(dg['cache_scsi_addr'], cache_disk_id) bad_scsi_addrs = [] capacity_disk_ids = [] capacity_disk_displays = [] for scsi_addr in dg['capacity_scsi_addrs']: if scsi_addr not in scsi_addr_to_disk_map: bad_scsi_addrs.append(scsi_addr) continue capacity_disk_ids.append(scsi_addr_to_disk_map[scsi_addr]['id']) capacity_disk_displays.append( '{0} (id:{1})'.format(scsi_addr, capacity_disk_ids[-1])) if bad_scsi_addrs: comments.append('Error in diskgroup #{0}: capacity disks with ' 'scsi addresses {1} were not found.' ''.format(idx, ', '.join(['\'{0}\''.format(a) for a in bad_scsi_addrs]))) log.error(comments[-1]) errors = True continue if not cache_disk_to_existing_diskgroup_map.get(cache_disk_id): # A new diskgroup needs to be created log.trace('erase_disks = %s', erase_disks) if erase_disks: if __opts__['test']: comments.append('State {0} will ' 'erase all disks of disk group #{1}; ' 'cache disk: \'{2}\', ' 'capacity disk(s): {3}.' ''.format(name, idx, cache_disk_display, ', '.join( ['\'{}\''.format(a) for a in capacity_disk_displays]))) else: # Erase disk group disks for disk_id in [cache_disk_id] + capacity_disk_ids: __salt__['vsphere.erase_disk_partitions']( disk_id=disk_id, service_instance=si) comments.append('Erased disks of diskgroup #{0}; ' 'cache disk: \'{1}\', capacity disk(s): ' '{2}'.format( idx, cache_disk_display, ', '.join(['\'{0}\''.format(a) for a in capacity_disk_displays]))) log.info(comments[-1]) if __opts__['test']: comments.append('State {0} will create ' 'the disk group #{1}; cache disk: \'{2}\', ' 'capacity disk(s): {3}.' .format(name, idx, cache_disk_display, ', '.join(['\'{0}\''.format(a) for a in capacity_disk_displays]))) log.info(comments[-1]) changes = True continue try: __salt__['vsphere.create_diskgroup'](cache_disk_id, capacity_disk_ids, safety_checks=False, service_instance=si) except VMwareSaltError as err: comments.append('Error creating disk group #{0}: ' '{1}.'.format(idx, err)) log.error(comments[-1]) errors = True continue comments.append('Created disk group #\'{0}\'.'.format(idx)) log.info(comments[-1]) diskgroup_changes[six.text_type(idx)] = \ {'new': {'cache': cache_disk_display, 'capacity': capacity_disk_displays}} changes = True continue # The diskgroup exists; checking the capacity disks log.debug('Disk group #%s exists. Checking capacity disks: %s.', idx, capacity_disk_displays) existing_diskgroup = \ cache_disk_to_existing_diskgroup_map.get(cache_disk_id) existing_capacity_disk_displays = \ ['{0} (id:{1})'.format([d['scsi_address'] for d in host_disks if d['id'] == disk_id][0], disk_id) for disk_id in existing_diskgroup['capacity_disks']] # Populate added disks and removed disks and their displays added_capacity_disk_ids = [] added_capacity_disk_displays = [] removed_capacity_disk_ids = [] removed_capacity_disk_displays = [] for disk_id in capacity_disk_ids: if disk_id not in existing_diskgroup['capacity_disks']: disk_scsi_addr = [d['scsi_address'] for d in host_disks if d['id'] == disk_id][0] added_capacity_disk_ids.append(disk_id) added_capacity_disk_displays.append( '{0} (id:{1})'.format(disk_scsi_addr, disk_id)) for disk_id in existing_diskgroup['capacity_disks']: if disk_id not in capacity_disk_ids: disk_scsi_addr = [d['scsi_address'] for d in host_disks if d['id'] == disk_id][0] removed_capacity_disk_ids.append(disk_id) removed_capacity_disk_displays.append( '{0} (id:{1})'.format(disk_scsi_addr, disk_id)) log.debug('Disk group #%s: existing capacity disk ids: %s; added ' 'capacity disk ids: %s; removed capacity disk ids: %s', idx, existing_capacity_disk_displays, added_capacity_disk_displays, removed_capacity_disk_displays) #TODO revisit this when removing capacity disks is supported if removed_capacity_disk_ids: comments.append( 'Error removing capacity disk(s) {0} from disk group #{1}; ' 'operation is not supported.' ''.format(', '.join(['\'{0}\''.format(id) for id in removed_capacity_disk_displays]), idx)) log.error(comments[-1]) errors = True continue if added_capacity_disk_ids: # Capacity disks need to be added to disk group # Building a string representation of the capacity disks # that need to be added s = ', '.join(['\'{0}\''.format(id) for id in added_capacity_disk_displays]) if __opts__['test']: comments.append('State {0} will add ' 'capacity disk(s) {1} to disk group #{2}.' ''.format(name, s, idx)) log.info(comments[-1]) changes = True continue try: __salt__['vsphere.add_capacity_to_diskgroup']( cache_disk_id, added_capacity_disk_ids, safety_checks=False, service_instance=si) except VMwareSaltError as err: comments.append('Error adding capacity disk(s) {0} to ' 'disk group #{1}: {2}.'.format(s, idx, err)) log.error(comments[-1]) errors = True continue com = ('Added capacity disk(s) {0} to disk group #{1}' ''.format(s, idx)) log.info(com) comments.append(com) diskgroup_changes[six.text_type(idx)] = \ {'new': {'cache': cache_disk_display, 'capacity': capacity_disk_displays}, 'old': {'cache': cache_disk_display, 'capacity': existing_capacity_disk_displays}} changes = True continue # No capacity needs to be added s = ('Disk group #{0} is correctly configured. Nothing to be done.' ''.format(idx)) log.info(s) comments.append(s) __salt__['vsphere.disconnect'](si) #Build the final return message result = (True if not (changes or errors) else # no changes/errors None if __opts__['test'] else # running in test mode False if errors else True) # found errors; defaults to True ret.update({'result': result, 'comment': '\n'.join(comments), 'changes': diskgroup_changes}) return ret
Configures the disk groups to use for vsan. This function will do the following: 1. Check whether or not all disks in the diskgroup spec exist, and raises and errors if they do not. 2. Create diskgroups with the correct disk configurations if diskgroup (identified by the cache disk canonical name) doesn't exist 3. Adds extra capacity disks to the existing diskgroup Example: .. code:: python { 'cache_scsi_addr': 'vmhba1:C0:T0:L0', 'capacity_scsi_addrs': [ 'vmhba2:C0:T0:L0', 'vmhba3:C0:T0:L0', 'vmhba4:C0:T0:L0', ] } name Mandatory state name diskgroups Disk group representation containing scsi disk addresses. Scsi addresses are expected for disks in the diskgroup: erase_disks Specifies whether to erase all partitions on all disks member of the disk group before the disk group is created. Default value is False.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/esxi.py#L1030-L1300
[ "def serialize(cls, id_=None):\n # Get the initial serialization\n serialized = super(DefinitionsSchema, cls).serialize(id_)\n complex_items = []\n # Augment the serializations with the definitions of all complex items\n aux_items = cls._items.values()\n\n # Convert dict_view object to a list on Python 3\n if six.PY3:\n aux_items = list(aux_items)\n\n while aux_items:\n item = aux_items.pop(0)\n # Add complex attributes\n if isinstance(item, ComplexSchemaItem):\n complex_items.append(item)\n aux_items.extend(item.get_complex_attrs())\n\n # Handle container items\n if isinstance(item, OneOfItem):\n aux_items.extend(item.items)\n elif isinstance(item, ArrayItem):\n aux_items.append(item.items)\n elif isinstance(item, DictItem):\n if item.properties:\n aux_items.extend(item.properties.values())\n if item.additional_properties and \\\n isinstance(item.additional_properties, SchemaItem):\n\n aux_items.append(item.additional_properties)\n\n definitions = OrderedDict()\n for config in complex_items:\n if isinstance(config, ComplexSchemaItem):\n definitions[config.definition_name] = \\\n config.get_definition()\n serialized['definitions'] = definitions\n return serialized\n" ]
# -*- coding: utf-8 -*- ''' Manage VMware ESXi Hosts. .. versionadded:: 2015.8.4 Dependencies ============ - pyVmomi Python Module - ESXCLI pyVmomi ------- PyVmomi can be installed via pip: .. code-block:: bash pip install pyVmomi .. note:: Version 6.0 of pyVmomi has some problems with SSL error handling on certain versions of Python. If using version 6.0 of pyVmomi, Python 2.6, Python 2.7.9, or newer must be present. This is due to an upstream dependency in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the version of Python is not in the supported range, you will need to install an earlier version of pyVmomi. See `Issue #29537`_ for more information. .. _Issue #29537: https://github.com/saltstack/salt/issues/29537 Based on the note above, to install an earlier version of pyVmomi than the version currently listed in PyPi, run the following: .. code-block:: bash pip install pyVmomi==5.5.0.2014.1.1 The 5.5.0.2014.1.1 is a known stable version that this original ESXi State Module was developed against. ESXCLI ------ Currently, about a third of the functions used in the vSphere Execution Module require the ESXCLI package be installed on the machine running the Proxy Minion process. The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware provides vCLI package installation instructions for `vSphere 5.5`_ and `vSphere 6.0`_. .. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html .. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html Once all of the required dependencies are in place and the vCLI package is installed, you can check to see if you can connect to your ESXi host or vCenter server by running the following command: .. code-block:: bash esxcli -s <host-location> -u <username> -p <password> system syslog config get If the connection was successful, ESXCLI was successfully installed on your system. You should see output related to the ESXi host's syslog configuration. .. note:: Be aware that some functionality in this state module may depend on the type of license attached to the ESXi host. For example, certain services are only available to manipulate service state or policies with a VMware vSphere Enterprise or Enterprise Plus license, while others are available with a Standard license. The ``ntpd`` service is restricted to an Enterprise Plus license, while ``ssh`` is available via the Standard license. Please see the `vSphere Comparison`_ page for more information. .. _vSphere Comparison: https://www.vmware.com/products/vsphere/compare About ----- This state module was written to be used in conjunction with Salt's :mod:`ESXi Proxy Minion <salt.proxy.esxi>`. For a tutorial on how to use Salt's ESXi Proxy Minion, please refer to the :ref:`ESXi Proxy Minion Tutorial <tutorial-esxi-proxy>` for configuration examples, dependency installation instructions, how to run remote execution functions against ESXi hosts via a Salt Proxy Minion, and a larger state example. ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import logging import sys import re # Import Salt Libs from salt.ext import six import salt.utils.files from salt.exceptions import CommandExecutionError, InvalidConfigError, \ VMwareObjectRetrievalError, VMwareSaltError, VMwareApiError, \ ArgumentValueError from salt.utils.decorators import depends from salt.config.schemas.esxi import DiskGroupsDiskScsiAddressSchema, \ HostCacheSchema # External libraries try: import jsonschema HAS_JSONSCHEMA = True except ImportError: HAS_JSONSCHEMA = False # Get Logging Started log = logging.getLogger(__name__) try: from pyVmomi import VmomiSupport # We check the supported vim versions to infer the pyVmomi version if 'vim25/6.0' in VmomiSupport.versionMap and \ sys.version_info > (2, 7) and sys.version_info < (2, 7, 9): log.debug('pyVmomi not loaded: Incompatible versions ' 'of Python. See Issue #29537.') raise ImportError() HAS_PYVMOMI = True except ImportError: HAS_PYVMOMI = False def __virtual__(): return 'esxi.cmd' in __salt__ def coredump_configured(name, enabled, dump_ip, host_vnic='vmk0', dump_port=6500): ''' Ensures a host's core dump configuration. name Name of the state. enabled Sets whether or not ESXi core dump collection should be enabled. This is a boolean value set to ``True`` or ``False`` to enable or disable core dumps. Note that ESXi requires that the core dump must be enabled before any other parameters may be set. This also affects the ``changes`` results in the state return dictionary. If ``enabled`` is ``False``, we can't obtain any previous settings to compare other state variables, resulting in many ``old`` references returning ``None``. Once ``enabled`` is ``True`` the ``changes`` dictionary comparisons will be more accurate. This is due to the way the system coredemp network configuration command returns data. dump_ip The IP address of host that will accept the dump. host_vnic Host VNic port through which to communicate. Defaults to ``vmk0``. dump_port TCP port to use for the dump. Defaults to ``6500``. Example: .. code-block:: yaml configure-host-coredump: esxi.coredump_configured: - enabled: True - dump_ip: 'my-coredump-ip.example.com' ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} esxi_cmd = 'esxi.cmd' enabled_msg = 'ESXi requires that the core dump must be enabled ' \ 'before any other parameters may be set.' host = __pillar__['proxy']['host'] current_config = __salt__[esxi_cmd]('get_coredump_network_config').get(host) error = current_config.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret current_config = current_config.get('Coredump Config') current_enabled = current_config.get('enabled') # Configure coredump enabled state, if there are changes. if current_enabled != enabled: enabled_changes = {'enabled': {'old': current_enabled, 'new': enabled}} # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('coredump_network_enable', enabled=enabled).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret # Allow users to disable core dump, but then return since # nothing else can be set if core dump is disabled. if not enabled: ret['result'] = True ret['comment'] = enabled_msg ret['changes'].update(enabled_changes) return ret ret['changes'].update(enabled_changes) elif not enabled: # If current_enabled and enabled match, but are both False, # We must return before configuring anything. This isn't a # failure as core dump may be disabled intentionally. ret['result'] = True ret['comment'] = enabled_msg return ret # Test for changes with all remaining configurations. The changes flag is used # To detect changes, and then set_coredump_network_config is called one time. changes = False current_ip = current_config.get('ip') if current_ip != dump_ip: ret['changes'].update({'dump_ip': {'old': current_ip, 'new': dump_ip}}) changes = True current_vnic = current_config.get('host_vnic') if current_vnic != host_vnic: ret['changes'].update({'host_vnic': {'old': current_vnic, 'new': host_vnic}}) changes = True current_port = current_config.get('port') if current_port != six.text_type(dump_port): ret['changes'].update({'dump_port': {'old': current_port, 'new': six.text_type(dump_port)}}) changes = True # Only run the command if not using test=True and changes were detected. if not __opts__['test'] and changes is True: response = __salt__[esxi_cmd]('set_coredump_network_config', dump_ip=dump_ip, host_vnic=host_vnic, dump_port=dump_port).get(host) if response.get('success') is False: msg = response.get('stderr') if not msg: msg = response.get('stdout') ret['comment'] = 'Error: {0}'.format(msg) return ret ret['result'] = True if ret['changes'] == {}: ret['comment'] = 'Core Dump configuration is already in the desired state.' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'Core dump configuration will change.' return ret def password_present(name, password): ''' Ensures the given password is set on the ESXi host. Passwords cannot be obtained from host, so if a password is set in this state, the ``vsphere.update_host_password`` function will always run (except when using test=True functionality) and the state's changes dictionary will always be populated. The username for which the password will change is the same username that is used to authenticate against the ESXi host via the Proxy Minion. For example, if the pillar definition for the proxy username is defined as ``root``, then the username that the password will be updated for via this state is ``root``. name Name of the state. password The new password to change on the host. Example: .. code-block:: yaml configure-host-password: esxi.password_present: - password: 'new-bad-password' ''' ret = {'name': name, 'result': True, 'changes': {'old': 'unknown', 'new': '********'}, 'comment': 'Host password was updated.'} esxi_cmd = 'esxi.cmd' if __opts__['test']: ret['result'] = None ret['comment'] = 'Host password will change.' return ret else: try: __salt__[esxi_cmd]('update_host_password', new_password=password) except CommandExecutionError as err: ret['result'] = False ret['comment'] = 'Error: {0}'.format(err) return ret return ret def ntp_configured(name, service_running, ntp_servers=None, service_policy=None, service_restart=False, update_datetime=False): ''' Ensures a host's NTP server configuration such as setting NTP servers, ensuring the NTP daemon is running or stopped, or restarting the NTP daemon for the ESXi host. name Name of the state. service_running Ensures the running state of the ntp daemon for the host. Boolean value where ``True`` indicates that ntpd should be running and ``False`` indicates that it should be stopped. ntp_servers A list of servers that should be added to the ESXi host's NTP configuration. service_policy The policy to set for the NTP service. .. note:: When setting the service policy to ``off`` or ``on``, you *must* quote the setting. If you don't, the yaml parser will set the string to a boolean, which will cause trouble checking for stateful changes and will error when trying to set the policy on the ESXi host. service_restart If set to ``True``, the ntp daemon will be restarted, regardless of its previous running state. Default is ``False``. update_datetime If set to ``True``, the date/time on the given host will be updated to UTC. Default setting is ``False``. This option should be used with caution since network delays and execution delays can result in time skews. Example: .. code-block:: yaml configure-host-ntp: esxi.ntp_configured: - service_running: True - ntp_servers: - 192.174.1.100 - 192.174.1.200 - service_policy: 'on' - service_restart: True ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} esxi_cmd = 'esxi.cmd' host = __pillar__['proxy']['host'] ntpd = 'ntpd' ntp_config = __salt__[esxi_cmd]('get_ntp_config').get(host) ntp_running = __salt__[esxi_cmd]('get_service_running', service_name=ntpd).get(host) error = ntp_running.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ntp_running = ntp_running.get(ntpd) # Configure NTP Servers for the Host if ntp_servers and set(ntp_servers) != set(ntp_config): # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('set_ntp_config', ntp_servers=ntp_servers).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret # Set changes dictionary for ntp_servers ret['changes'].update({'ntp_servers': {'old': ntp_config, 'new': ntp_servers}}) # Configure service_running state if service_running != ntp_running: # Only run the command if not using test=True if not __opts__['test']: # Start ntdp if service_running=True if ntp_running is True: response = __salt__[esxi_cmd]('service_start', service_name=ntpd).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret # Stop ntpd if service_running=False else: response = __salt__[esxi_cmd]('service_stop', service_name=ntpd).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'service_running': {'old': ntp_running, 'new': service_running}}) # Configure service_policy if service_policy: current_service_policy = __salt__[esxi_cmd]('get_service_policy', service_name=ntpd).get(host) error = current_service_policy.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret current_service_policy = current_service_policy.get(ntpd) if service_policy != current_service_policy: # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('set_service_policy', service_name=ntpd, service_policy=service_policy).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'service_policy': {'old': current_service_policy, 'new': service_policy}}) # Update datetime, if requested. if update_datetime: # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('update_host_datetime').get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'update_datetime': {'old': '', 'new': 'Host datetime was updated.'}}) # Restart ntp_service if service_restart=True if service_restart: # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('service_restart', service_name=ntpd).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'service_restart': {'old': '', 'new': 'NTP Daemon Restarted.'}}) ret['result'] = True if ret['changes'] == {}: ret['comment'] = 'NTP is already in the desired state.' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'NTP state will change.' return ret def vmotion_configured(name, enabled, device='vmk0'): ''' Configures a host's VMotion properties such as enabling VMotion and setting the device VirtualNic that VMotion will use. name Name of the state. enabled Ensures whether or not VMotion should be enabled on a host as a boolean value where ``True`` indicates that VMotion should be enabled and ``False`` indicates that VMotion should be disabled. device The device that uniquely identifies the VirtualNic that will be used for VMotion for the host. Defaults to ``vmk0``. Example: .. code-block:: yaml configure-vmotion: esxi.vmotion_configured: - enabled: True - device: sample-device ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} esxi_cmd = 'esxi.cmd' host = __pillar__['proxy']['host'] current_vmotion_enabled = __salt__[esxi_cmd]('get_vmotion_enabled').get(host) current_vmotion_enabled = current_vmotion_enabled.get('VMotion Enabled') # Configure VMotion Enabled state, if changed. if enabled != current_vmotion_enabled: # Only run the command if not using test=True if not __opts__['test']: # Enable VMotion if enabled=True if enabled is True: response = __salt__[esxi_cmd]('vmotion_enable', device=device).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret # Disable VMotion if enabled=False else: response = __salt__[esxi_cmd]('vmotion_disable').get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'enabled': {'old': current_vmotion_enabled, 'new': enabled}}) ret['result'] = True if ret['changes'] == {}: ret['comment'] = 'VMotion configuration is already in the desired state.' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'VMotion configuration will change.' return ret def vsan_configured(name, enabled, add_disks_to_vsan=False): ''' Configures a host's VSAN properties such as enabling or disabling VSAN, or adding VSAN-eligible disks to the VSAN system for the host. name Name of the state. enabled Ensures whether or not VSAN should be enabled on a host as a boolean value where ``True`` indicates that VSAN should be enabled and ``False`` indicates that VSAN should be disabled. add_disks_to_vsan If set to ``True``, any VSAN-eligible disks for the given host will be added to the host's VSAN system. Default is ``False``. Example: .. code-block:: yaml configure-host-vsan: esxi.vsan_configured: - enabled: True - add_disks_to_vsan: True ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} esxi_cmd = 'esxi.cmd' host = __pillar__['proxy']['host'] current_vsan_enabled = __salt__[esxi_cmd]('get_vsan_enabled').get(host) error = current_vsan_enabled.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret current_vsan_enabled = current_vsan_enabled.get('VSAN Enabled') # Configure VSAN Enabled state, if changed. if enabled != current_vsan_enabled: # Only run the command if not using test=True if not __opts__['test']: # Enable VSAN if enabled=True if enabled is True: response = __salt__[esxi_cmd]('vsan_enable').get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret # Disable VSAN if enabled=False else: response = __salt__[esxi_cmd]('vsan_disable').get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'enabled': {'old': current_vsan_enabled, 'new': enabled}}) # Add any eligible disks to VSAN, if requested. if add_disks_to_vsan: current_eligible_disks = __salt__[esxi_cmd]('get_vsan_eligible_disks').get(host) error = current_eligible_disks.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret disks = current_eligible_disks.get('Eligible') if disks and isinstance(disks, list): # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('vsan_add_disks').get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'add_disks_to_vsan': {'old': '', 'new': disks}}) ret['result'] = True if ret['changes'] == {}: ret['comment'] = 'VSAN configuration is already in the desired state.' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'VSAN configuration will change.' return ret def ssh_configured(name, service_running, ssh_key=None, ssh_key_file=None, service_policy=None, service_restart=False, certificate_verify=False): ''' Manage the SSH configuration for a host including whether or not SSH is running or the presence of a given SSH key. Note: Only one ssh key can be uploaded for root. Uploading a second key will replace any existing key. name Name of the state. service_running Ensures whether or not the SSH service should be running on a host. Represented as a boolean value where ``True`` indicates that SSH should be running and ``False`` indicates that SSH should stopped. In order to update SSH keys, the SSH service must be running. ssh_key Public SSH key to added to the authorized_keys file on the ESXi host. You can use ``ssh_key`` or ``ssh_key_file``, but not both. ssh_key_file File containing the public SSH key to be added to the authorized_keys file on the ESXi host. You can use ``ssh_key_file`` or ``ssh_key``, but not both. service_policy The policy to set for the NTP service. .. note:: When setting the service policy to ``off`` or ``on``, you *must* quote the setting. If you don't, the yaml parser will set the string to a boolean, which will cause trouble checking for stateful changes and will error when trying to set the policy on the ESXi host. service_restart If set to ``True``, the SSH service will be restarted, regardless of its previous running state. Default is ``False``. certificate_verify If set to ``True``, the SSL connection must present a valid certificate. Default is ``False``. Example: .. code-block:: yaml configure-host-ssh: esxi.ssh_configured: - service_running: True - ssh_key_file: /etc/salt/ssh_keys/my_key.pub - service_policy: 'on' - service_restart: True - certificate_verify: True ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} esxi_cmd = 'esxi.cmd' host = __pillar__['proxy']['host'] ssh = 'ssh' ssh_running = __salt__[esxi_cmd]('get_service_running', service_name=ssh).get(host) error = ssh_running.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ssh_running = ssh_running.get(ssh) # Configure SSH service_running state, if changed. if service_running != ssh_running: # Only actually run the command if not using test=True if not __opts__['test']: # Start SSH if service_running=True if service_running is True: enable = __salt__[esxi_cmd]('service_start', service_name=ssh).get(host) error = enable.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret # Disable SSH if service_running=False else: disable = __salt__[esxi_cmd]('service_stop', service_name=ssh).get(host) error = disable.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'service_running': {'old': ssh_running, 'new': service_running}}) # If uploading an SSH key or SSH key file, see if there's a current # SSH key and compare the current key to the key set in the state. current_ssh_key, ssh_key_changed = None, False if ssh_key or ssh_key_file: current_ssh_key = __salt__[esxi_cmd]('get_ssh_key', certificate_verify=certificate_verify) error = current_ssh_key.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret current_ssh_key = current_ssh_key.get('key') if current_ssh_key: clean_current_key = _strip_key(current_ssh_key).split(' ') if not ssh_key: ssh_key = '' # Open ssh key file and read in contents to create one key string with salt.utils.files.fopen(ssh_key_file, 'r') as key_file: for line in key_file: if line.startswith('#'): # Commented line continue ssh_key = ssh_key + line clean_ssh_key = _strip_key(ssh_key).split(' ') # Check that the first two list items of clean key lists are equal. if clean_current_key[0] != clean_ssh_key[0] or clean_current_key[1] != clean_ssh_key[1]: ssh_key_changed = True else: # If current_ssh_key is None, but we're setting a new key with # either ssh_key or ssh_key_file, then we need to flag the change. ssh_key_changed = True # Upload SSH key, if changed. if ssh_key_changed: if not __opts__['test']: # Upload key response = __salt__[esxi_cmd]('upload_ssh_key', ssh_key=ssh_key, ssh_key_file=ssh_key_file, certificate_verify=certificate_verify) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'SSH Key': {'old': current_ssh_key, 'new': ssh_key if ssh_key else ssh_key_file}}) # Configure service_policy if service_policy: current_service_policy = __salt__[esxi_cmd]('get_service_policy', service_name=ssh).get(host) error = current_service_policy.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret current_service_policy = current_service_policy.get(ssh) if service_policy != current_service_policy: # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('set_service_policy', service_name=ssh, service_policy=service_policy).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'service_policy': {'old': current_service_policy, 'new': service_policy}}) # Restart ssh_service if service_restart=True if service_restart: # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('service_restart', service_name=ssh).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'service_restart': {'old': '', 'new': 'SSH service restarted.'}}) ret['result'] = True if ret['changes'] == {}: ret['comment'] = 'SSH service is already in the desired state.' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'SSH service state will change.' return ret def syslog_configured(name, syslog_configs, firewall=True, reset_service=True, reset_syslog_config=False, reset_configs=None): ''' Ensures the specified syslog configuration parameters. By default, this state will reset the syslog service after any new or changed parameters are set successfully. name Name of the state. syslog_configs Name of parameter to set (corresponds to the command line switch for esxcli without the double dashes (--)) Valid syslog_config values are ``logdir``, ``loghost``, ``logdir-unique``, ``default-rotate``, ``default-size``, and ``default-timeout``. Each syslog_config option also needs a configuration value to set. For example, ``loghost`` requires URLs or IP addresses to use for logging. Multiple log servers can be specified by listing them, comma-separated, but without spaces before or after commas (reference: https://blogs.vmware.com/vsphere/2012/04/configuring-multiple-syslog-servers-for-esxi-5.html) firewall Enable the firewall rule set for syslog. Defaults to ``True``. reset_service After a successful parameter set, reset the service. Defaults to ``True``. reset_syslog_config Resets the syslog service to it's default settings. Defaults to ``False``. If set to ``True``, default settings defined by the list of syslog configs in ``reset_configs`` will be reset before running any other syslog settings. reset_configs A comma-delimited list of parameters to reset. Only runs if ``reset_syslog_config`` is set to ``True``. If ``reset_syslog_config`` is set to ``True``, but no syslog configs are listed in ``reset_configs``, then ``reset_configs`` will be set to ``all`` by default. See ``syslog_configs`` parameter above for a list of valid options. Example: .. code-block:: yaml configure-host-syslog: esxi.syslog_configured: - syslog_configs: loghost: ssl://localhost:5432,tcp://10.1.0.1:1514 default-timeout: 120 - firewall: True - reset_service: True - reset_syslog_config: True - reset_configs: loghost,default-timeout ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} esxi_cmd = 'esxi.cmd' host = __pillar__['proxy']['host'] if reset_syslog_config: if not reset_configs: reset_configs = 'all' # Only run the command if not using test=True if not __opts__['test']: reset = __salt__[esxi_cmd]('reset_syslog_config', syslog_config=reset_configs).get(host) for key, val in six.iteritems(reset): if isinstance(val, bool): continue if not val.get('success'): msg = val.get('message') if not msg: msg = 'There was an error resetting a syslog config \'{0}\'.' \ 'Please check debug logs.'.format(val) ret['comment'] = 'Error: {0}'.format(msg) return ret ret['changes'].update({'reset_syslog_config': {'old': '', 'new': reset_configs}}) current_firewall = __salt__[esxi_cmd]('get_firewall_status').get(host) error = current_firewall.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret current_firewall = current_firewall.get('rulesets').get('syslog') if current_firewall != firewall: # Only run the command if not using test=True if not __opts__['test']: enabled = __salt__[esxi_cmd]('enable_firewall_ruleset', ruleset_enable=firewall, ruleset_name='syslog').get(host) if enabled.get('retcode') != 0: err = enabled.get('stderr') out = enabled.get('stdout') ret['comment'] = 'Error: {0}'.format(err if err else out) return ret ret['changes'].update({'firewall': {'old': current_firewall, 'new': firewall}}) current_syslog_config = __salt__[esxi_cmd]('get_syslog_config').get(host) for key, val in six.iteritems(syslog_configs): # The output of get_syslog_config has different keys than the keys # Used to set syslog_config values. We need to look them up first. try: lookup_key = _lookup_syslog_config(key) except KeyError: ret['comment'] = '\'{0}\' is not a valid config variable.'.format(key) return ret current_val = current_syslog_config[lookup_key] if six.text_type(current_val) != six.text_type(val): # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('set_syslog_config', syslog_config=key, config_value=val, firewall=firewall, reset_service=reset_service).get(host) success = response.get(key).get('success') if not success: msg = response.get(key).get('message') if not msg: msg = 'There was an error setting syslog config \'{0}\'. ' \ 'Please check debug logs.'.format(key) ret['comment'] = msg return ret if not ret['changes'].get('syslog_config'): ret['changes'].update({'syslog_config': {}}) ret['changes']['syslog_config'].update({key: {'old': current_val, 'new': val}}) ret['result'] = True if ret['changes'] == {}: ret['comment'] = 'Syslog is already in the desired state.' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'Syslog state will change.' return ret @depends(HAS_PYVMOMI) @depends(HAS_JSONSCHEMA) @depends(HAS_PYVMOMI) @depends(HAS_JSONSCHEMA) def host_cache_configured(name, enabled, datastore, swap_size='100%', dedicated_backing_disk=False, erase_backing_disk=False): ''' Configures the host cache used for swapping. It will do the following: 1. Checks if backing disk exists 2. Creates the VMFS datastore if doesn't exist (datastore partition will be created and use the entire disk) 3. Raises an error if ``dedicated_backing_disk`` is ``True`` and partitions already exist on the backing disk 4. Configures host_cache to use a portion of the datastore for caching (either a specific size or a percentage of the datastore) Examples Percentage swap size (can't be 100%) .. code:: python { 'enabled': true, 'datastore': { 'backing_disk_scsi_addr': 'vmhba0:C0:T0:L0', 'vmfs_version': 5, 'name': 'hostcache' } 'dedicated_backing_disk': false 'swap_size': '98%', } Fixed sized swap size .. code:: python { 'enabled': true, 'datastore': { 'backing_disk_scsi_addr': 'vmhba0:C0:T0:L0', 'vmfs_version': 5, 'name': 'hostcache' } 'dedicated_backing_disk': true 'swap_size': '10GiB', } name Mandatory state name. enabled Specifies whether the host cache is enabled. datastore Specifies the host cache datastore. swap_size Specifies the size of the host cache swap. Can be a percentage or a value in GiB. Default value is ``100%``. dedicated_backing_disk Specifies whether the backing disk is dedicated to the host cache which means it must have no other partitions. Default is False erase_backing_disk Specifies whether to erase all partitions on the backing disk before the datastore is created. Default value is False. ''' log.trace('enabled = %s', enabled) log.trace('datastore = %s', datastore) log.trace('swap_size = %s', swap_size) log.trace('erase_backing_disk = %s', erase_backing_disk) # Variable used to return the result of the invocation proxy_details = __salt__['esxi.get_details']() hostname = proxy_details['host'] if not proxy_details.get('vcenter') \ else proxy_details['esxi_host'] log.trace('hostname = %s', hostname) log.info('Running host_cache_swap_configured for host \'%s\'', hostname) ret = {'name': hostname, 'comment': 'Default comments', 'result': None, 'changes': {}} result = None if __opts__['test'] else True # We assume success needs_setting = False comments = [] changes = {} si = None try: log.debug('Validating host_cache_configured input') schema = HostCacheSchema.serialize() try: jsonschema.validate({'enabled': enabled, 'datastore': datastore, 'swap_size': swap_size, 'erase_backing_disk': erase_backing_disk}, schema) except jsonschema.exceptions.ValidationError as exc: raise InvalidConfigError(exc) m = re.match(r'(\d+)(%|GiB)', swap_size) swap_size_value = int(m.group(1)) swap_type = m.group(2) log.trace('swap_size_value = %s; swap_type = %s', swap_size_value, swap_type) si = __salt__['vsphere.get_service_instance_via_proxy']() host_cache = __salt__['vsphere.get_host_cache'](service_instance=si) # Check enabled if host_cache['enabled'] != enabled: changes.update({'enabled': {'old': host_cache['enabled'], 'new': enabled}}) needs_setting = True # Check datastores existing_datastores = None if host_cache.get('datastore'): existing_datastores = \ __salt__['vsphere.list_datastores_via_proxy']( datastore_names=[datastore['name']], service_instance=si) # Retrieve backing disks existing_disks = __salt__['vsphere.list_disks']( scsi_addresses=[datastore['backing_disk_scsi_addr']], service_instance=si) if not existing_disks: raise VMwareObjectRetrievalError( 'Disk with scsi address \'{0}\' was not found in host \'{1}\'' ''.format(datastore['backing_disk_scsi_addr'], hostname)) backing_disk = existing_disks[0] backing_disk_display = '{0} (id:{1})'.format( backing_disk['scsi_address'], backing_disk['id']) log.trace('backing_disk = %s', backing_disk_display) existing_datastore = None if not existing_datastores: # Check if disk needs to be erased if erase_backing_disk: if __opts__['test']: comments.append('State {0} will erase ' 'the backing disk \'{1}\' on host \'{2}\'.' ''.format(name, backing_disk_display, hostname)) log.info(comments[-1]) else: # Erase disk __salt__['vsphere.erase_disk_partitions']( disk_id=backing_disk['id'], service_instance=si) comments.append('Erased backing disk \'{0}\' on host ' '\'{1}\'.'.format(backing_disk_display, hostname)) log.info(comments[-1]) # Create the datastore if __opts__['test']: comments.append('State {0} will create ' 'the datastore \'{1}\', with backing disk ' '\'{2}\', on host \'{3}\'.' ''.format(name, datastore['name'], backing_disk_display, hostname)) log.info(comments[-1]) else: if dedicated_backing_disk: # Check backing disk doesn't already have partitions partitions = __salt__['vsphere.list_disk_partitions']( disk_id=backing_disk['id'], service_instance=si) log.trace('partitions = %s', partitions) # We will ignore the mbr partitions non_mbr_partitions = [p for p in partitions if p['format'] != 'mbr'] if non_mbr_partitions: raise VMwareApiError( 'Backing disk \'{0}\' has unexpected partitions' ''.format(backing_disk_display)) __salt__['vsphere.create_vmfs_datastore']( datastore['name'], existing_disks[0]['id'], datastore['vmfs_version'], service_instance=si) comments.append('Created vmfs datastore \'{0}\', backed by ' 'disk \'{1}\', on host \'{2}\'.' ''.format(datastore['name'], backing_disk_display, hostname)) log.info(comments[-1]) changes.update( {'datastore': {'new': {'name': datastore['name'], 'backing_disk': backing_disk_display}}}) existing_datastore = \ __salt__['vsphere.list_datastores_via_proxy']( datastore_names=[datastore['name']], service_instance=si)[0] needs_setting = True else: # Check datastore is backed by the correct disk if not existing_datastores[0].get('backing_disk_ids'): raise VMwareSaltError('Datastore \'{0}\' doesn\'t have a ' 'backing disk' ''.format(datastore['name'])) if backing_disk['id'] not in \ existing_datastores[0]['backing_disk_ids']: raise VMwareSaltError( 'Datastore \'{0}\' is not backed by the correct disk: ' 'expected \'{1}\'; got {2}' ''.format( datastore['name'], backing_disk['id'], ', '.join( ['\'{0}\''.format(disk) for disk in existing_datastores[0]['backing_disk_ids']]))) comments.append('Datastore \'{0}\' already exists on host \'{1}\' ' 'and is backed by disk \'{2}\'. Nothing to be ' 'done.'.format(datastore['name'], hostname, backing_disk_display)) existing_datastore = existing_datastores[0] log.trace('existing_datastore = %s', existing_datastore) log.info(comments[-1]) if existing_datastore: # The following comparisons can be done if the existing_datastore # is set; it may not be set if running in test mode # # We support percent, as well as MiB, we will convert the size # to MiB, multiples of 1024 (VMware SDK limitation) if swap_type == '%': # Percentage swap size # Convert from bytes to MiB raw_size_MiB = (swap_size_value/100.0) * \ (existing_datastore['capacity']/1024/1024) else: raw_size_MiB = swap_size_value * 1024 log.trace('raw_size = %sMiB', raw_size_MiB) swap_size_MiB = int(raw_size_MiB/1024)*1024 log.trace('adjusted swap_size = %sMiB', swap_size_MiB) existing_swap_size_MiB = 0 m = re.match(r'(\d+)MiB', host_cache.get('swap_size')) if \ host_cache.get('swap_size') else None if m: # if swap_size from the host is set and has an expected value # we are going to parse it to get the number of MiBs existing_swap_size_MiB = int(m.group(1)) if not existing_swap_size_MiB == swap_size_MiB: needs_setting = True changes.update( {'swap_size': {'old': '{}GiB'.format(existing_swap_size_MiB/1024), 'new': '{}GiB'.format(swap_size_MiB/1024)}}) if needs_setting: if __opts__['test']: comments.append('State {0} will configure ' 'the host cache on host \'{1}\' to: {2}.' ''.format(name, hostname, {'enabled': enabled, 'datastore_name': datastore['name'], 'swap_size': swap_size})) else: if (existing_datastore['capacity'] / 1024.0**2) < \ swap_size_MiB: raise ArgumentValueError( 'Capacity of host cache datastore \'{0}\' ({1} MiB) is ' 'smaller than the required swap size ({2} MiB)' ''.format(existing_datastore['name'], existing_datastore['capacity'] / 1024.0**2, swap_size_MiB)) __salt__['vsphere.configure_host_cache']( enabled, datastore['name'], swap_size_MiB=swap_size_MiB, service_instance=si) comments.append('Host cache configured on host ' '\'{0}\'.'.format(hostname)) else: comments.append('Host cache on host \'{0}\' is already correctly ' 'configured. Nothing to be done.'.format(hostname)) result = True __salt__['vsphere.disconnect'](si) log.info(comments[-1]) ret.update({'comment': '\n'.join(comments), 'result': result, 'changes': changes}) return ret except CommandExecutionError as err: log.error('Error: %s.', err) if si: __salt__['vsphere.disconnect'](si) ret.update({ 'result': False if not __opts__['test'] else None, 'comment': '{}.'.format(err)}) return ret def _lookup_syslog_config(config): ''' Helper function that looks up syslog_config keys available from ``vsphere.get_syslog_config``. ''' lookup = {'default-timeout': 'Default Network Retry Timeout', 'logdir': 'Local Log Output', 'default-size': 'Local Logging Default Rotation Size', 'logdir-unique': 'Log To Unique Subdirectory', 'default-rotate': 'Local Logging Default Rotations', 'loghost': 'Remote Host'} return lookup.get(config) def _strip_key(key_string): ''' Strips an SSH key string of white space and line endings and returns the new string. key_string The string to be stripped. ''' key_string.strip() key_string.replace('\n', '') key_string.replace('\r\n', '') return key_string
saltstack/salt
salt/states/esxi.py
host_cache_configured
python
def host_cache_configured(name, enabled, datastore, swap_size='100%', dedicated_backing_disk=False, erase_backing_disk=False): ''' Configures the host cache used for swapping. It will do the following: 1. Checks if backing disk exists 2. Creates the VMFS datastore if doesn't exist (datastore partition will be created and use the entire disk) 3. Raises an error if ``dedicated_backing_disk`` is ``True`` and partitions already exist on the backing disk 4. Configures host_cache to use a portion of the datastore for caching (either a specific size or a percentage of the datastore) Examples Percentage swap size (can't be 100%) .. code:: python { 'enabled': true, 'datastore': { 'backing_disk_scsi_addr': 'vmhba0:C0:T0:L0', 'vmfs_version': 5, 'name': 'hostcache' } 'dedicated_backing_disk': false 'swap_size': '98%', } Fixed sized swap size .. code:: python { 'enabled': true, 'datastore': { 'backing_disk_scsi_addr': 'vmhba0:C0:T0:L0', 'vmfs_version': 5, 'name': 'hostcache' } 'dedicated_backing_disk': true 'swap_size': '10GiB', } name Mandatory state name. enabled Specifies whether the host cache is enabled. datastore Specifies the host cache datastore. swap_size Specifies the size of the host cache swap. Can be a percentage or a value in GiB. Default value is ``100%``. dedicated_backing_disk Specifies whether the backing disk is dedicated to the host cache which means it must have no other partitions. Default is False erase_backing_disk Specifies whether to erase all partitions on the backing disk before the datastore is created. Default value is False. ''' log.trace('enabled = %s', enabled) log.trace('datastore = %s', datastore) log.trace('swap_size = %s', swap_size) log.trace('erase_backing_disk = %s', erase_backing_disk) # Variable used to return the result of the invocation proxy_details = __salt__['esxi.get_details']() hostname = proxy_details['host'] if not proxy_details.get('vcenter') \ else proxy_details['esxi_host'] log.trace('hostname = %s', hostname) log.info('Running host_cache_swap_configured for host \'%s\'', hostname) ret = {'name': hostname, 'comment': 'Default comments', 'result': None, 'changes': {}} result = None if __opts__['test'] else True # We assume success needs_setting = False comments = [] changes = {} si = None try: log.debug('Validating host_cache_configured input') schema = HostCacheSchema.serialize() try: jsonschema.validate({'enabled': enabled, 'datastore': datastore, 'swap_size': swap_size, 'erase_backing_disk': erase_backing_disk}, schema) except jsonschema.exceptions.ValidationError as exc: raise InvalidConfigError(exc) m = re.match(r'(\d+)(%|GiB)', swap_size) swap_size_value = int(m.group(1)) swap_type = m.group(2) log.trace('swap_size_value = %s; swap_type = %s', swap_size_value, swap_type) si = __salt__['vsphere.get_service_instance_via_proxy']() host_cache = __salt__['vsphere.get_host_cache'](service_instance=si) # Check enabled if host_cache['enabled'] != enabled: changes.update({'enabled': {'old': host_cache['enabled'], 'new': enabled}}) needs_setting = True # Check datastores existing_datastores = None if host_cache.get('datastore'): existing_datastores = \ __salt__['vsphere.list_datastores_via_proxy']( datastore_names=[datastore['name']], service_instance=si) # Retrieve backing disks existing_disks = __salt__['vsphere.list_disks']( scsi_addresses=[datastore['backing_disk_scsi_addr']], service_instance=si) if not existing_disks: raise VMwareObjectRetrievalError( 'Disk with scsi address \'{0}\' was not found in host \'{1}\'' ''.format(datastore['backing_disk_scsi_addr'], hostname)) backing_disk = existing_disks[0] backing_disk_display = '{0} (id:{1})'.format( backing_disk['scsi_address'], backing_disk['id']) log.trace('backing_disk = %s', backing_disk_display) existing_datastore = None if not existing_datastores: # Check if disk needs to be erased if erase_backing_disk: if __opts__['test']: comments.append('State {0} will erase ' 'the backing disk \'{1}\' on host \'{2}\'.' ''.format(name, backing_disk_display, hostname)) log.info(comments[-1]) else: # Erase disk __salt__['vsphere.erase_disk_partitions']( disk_id=backing_disk['id'], service_instance=si) comments.append('Erased backing disk \'{0}\' on host ' '\'{1}\'.'.format(backing_disk_display, hostname)) log.info(comments[-1]) # Create the datastore if __opts__['test']: comments.append('State {0} will create ' 'the datastore \'{1}\', with backing disk ' '\'{2}\', on host \'{3}\'.' ''.format(name, datastore['name'], backing_disk_display, hostname)) log.info(comments[-1]) else: if dedicated_backing_disk: # Check backing disk doesn't already have partitions partitions = __salt__['vsphere.list_disk_partitions']( disk_id=backing_disk['id'], service_instance=si) log.trace('partitions = %s', partitions) # We will ignore the mbr partitions non_mbr_partitions = [p for p in partitions if p['format'] != 'mbr'] if non_mbr_partitions: raise VMwareApiError( 'Backing disk \'{0}\' has unexpected partitions' ''.format(backing_disk_display)) __salt__['vsphere.create_vmfs_datastore']( datastore['name'], existing_disks[0]['id'], datastore['vmfs_version'], service_instance=si) comments.append('Created vmfs datastore \'{0}\', backed by ' 'disk \'{1}\', on host \'{2}\'.' ''.format(datastore['name'], backing_disk_display, hostname)) log.info(comments[-1]) changes.update( {'datastore': {'new': {'name': datastore['name'], 'backing_disk': backing_disk_display}}}) existing_datastore = \ __salt__['vsphere.list_datastores_via_proxy']( datastore_names=[datastore['name']], service_instance=si)[0] needs_setting = True else: # Check datastore is backed by the correct disk if not existing_datastores[0].get('backing_disk_ids'): raise VMwareSaltError('Datastore \'{0}\' doesn\'t have a ' 'backing disk' ''.format(datastore['name'])) if backing_disk['id'] not in \ existing_datastores[0]['backing_disk_ids']: raise VMwareSaltError( 'Datastore \'{0}\' is not backed by the correct disk: ' 'expected \'{1}\'; got {2}' ''.format( datastore['name'], backing_disk['id'], ', '.join( ['\'{0}\''.format(disk) for disk in existing_datastores[0]['backing_disk_ids']]))) comments.append('Datastore \'{0}\' already exists on host \'{1}\' ' 'and is backed by disk \'{2}\'. Nothing to be ' 'done.'.format(datastore['name'], hostname, backing_disk_display)) existing_datastore = existing_datastores[0] log.trace('existing_datastore = %s', existing_datastore) log.info(comments[-1]) if existing_datastore: # The following comparisons can be done if the existing_datastore # is set; it may not be set if running in test mode # # We support percent, as well as MiB, we will convert the size # to MiB, multiples of 1024 (VMware SDK limitation) if swap_type == '%': # Percentage swap size # Convert from bytes to MiB raw_size_MiB = (swap_size_value/100.0) * \ (existing_datastore['capacity']/1024/1024) else: raw_size_MiB = swap_size_value * 1024 log.trace('raw_size = %sMiB', raw_size_MiB) swap_size_MiB = int(raw_size_MiB/1024)*1024 log.trace('adjusted swap_size = %sMiB', swap_size_MiB) existing_swap_size_MiB = 0 m = re.match(r'(\d+)MiB', host_cache.get('swap_size')) if \ host_cache.get('swap_size') else None if m: # if swap_size from the host is set and has an expected value # we are going to parse it to get the number of MiBs existing_swap_size_MiB = int(m.group(1)) if not existing_swap_size_MiB == swap_size_MiB: needs_setting = True changes.update( {'swap_size': {'old': '{}GiB'.format(existing_swap_size_MiB/1024), 'new': '{}GiB'.format(swap_size_MiB/1024)}}) if needs_setting: if __opts__['test']: comments.append('State {0} will configure ' 'the host cache on host \'{1}\' to: {2}.' ''.format(name, hostname, {'enabled': enabled, 'datastore_name': datastore['name'], 'swap_size': swap_size})) else: if (existing_datastore['capacity'] / 1024.0**2) < \ swap_size_MiB: raise ArgumentValueError( 'Capacity of host cache datastore \'{0}\' ({1} MiB) is ' 'smaller than the required swap size ({2} MiB)' ''.format(existing_datastore['name'], existing_datastore['capacity'] / 1024.0**2, swap_size_MiB)) __salt__['vsphere.configure_host_cache']( enabled, datastore['name'], swap_size_MiB=swap_size_MiB, service_instance=si) comments.append('Host cache configured on host ' '\'{0}\'.'.format(hostname)) else: comments.append('Host cache on host \'{0}\' is already correctly ' 'configured. Nothing to be done.'.format(hostname)) result = True __salt__['vsphere.disconnect'](si) log.info(comments[-1]) ret.update({'comment': '\n'.join(comments), 'result': result, 'changes': changes}) return ret except CommandExecutionError as err: log.error('Error: %s.', err) if si: __salt__['vsphere.disconnect'](si) ret.update({ 'result': False if not __opts__['test'] else None, 'comment': '{}.'.format(err)}) return ret
Configures the host cache used for swapping. It will do the following: 1. Checks if backing disk exists 2. Creates the VMFS datastore if doesn't exist (datastore partition will be created and use the entire disk) 3. Raises an error if ``dedicated_backing_disk`` is ``True`` and partitions already exist on the backing disk 4. Configures host_cache to use a portion of the datastore for caching (either a specific size or a percentage of the datastore) Examples Percentage swap size (can't be 100%) .. code:: python { 'enabled': true, 'datastore': { 'backing_disk_scsi_addr': 'vmhba0:C0:T0:L0', 'vmfs_version': 5, 'name': 'hostcache' } 'dedicated_backing_disk': false 'swap_size': '98%', } Fixed sized swap size .. code:: python { 'enabled': true, 'datastore': { 'backing_disk_scsi_addr': 'vmhba0:C0:T0:L0', 'vmfs_version': 5, 'name': 'hostcache' } 'dedicated_backing_disk': true 'swap_size': '10GiB', } name Mandatory state name. enabled Specifies whether the host cache is enabled. datastore Specifies the host cache datastore. swap_size Specifies the size of the host cache swap. Can be a percentage or a value in GiB. Default value is ``100%``. dedicated_backing_disk Specifies whether the backing disk is dedicated to the host cache which means it must have no other partitions. Default is False erase_backing_disk Specifies whether to erase all partitions on the backing disk before the datastore is created. Default value is False.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/esxi.py#L1305-L1594
[ "def serialize(cls, id_=None):\n # Get the initial serialization\n serialized = super(DefinitionsSchema, cls).serialize(id_)\n complex_items = []\n # Augment the serializations with the definitions of all complex items\n aux_items = cls._items.values()\n\n # Convert dict_view object to a list on Python 3\n if six.PY3:\n aux_items = list(aux_items)\n\n while aux_items:\n item = aux_items.pop(0)\n # Add complex attributes\n if isinstance(item, ComplexSchemaItem):\n complex_items.append(item)\n aux_items.extend(item.get_complex_attrs())\n\n # Handle container items\n if isinstance(item, OneOfItem):\n aux_items.extend(item.items)\n elif isinstance(item, ArrayItem):\n aux_items.append(item.items)\n elif isinstance(item, DictItem):\n if item.properties:\n aux_items.extend(item.properties.values())\n if item.additional_properties and \\\n isinstance(item.additional_properties, SchemaItem):\n\n aux_items.append(item.additional_properties)\n\n definitions = OrderedDict()\n for config in complex_items:\n if isinstance(config, ComplexSchemaItem):\n definitions[config.definition_name] = \\\n config.get_definition()\n serialized['definitions'] = definitions\n return serialized\n" ]
# -*- coding: utf-8 -*- ''' Manage VMware ESXi Hosts. .. versionadded:: 2015.8.4 Dependencies ============ - pyVmomi Python Module - ESXCLI pyVmomi ------- PyVmomi can be installed via pip: .. code-block:: bash pip install pyVmomi .. note:: Version 6.0 of pyVmomi has some problems with SSL error handling on certain versions of Python. If using version 6.0 of pyVmomi, Python 2.6, Python 2.7.9, or newer must be present. This is due to an upstream dependency in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the version of Python is not in the supported range, you will need to install an earlier version of pyVmomi. See `Issue #29537`_ for more information. .. _Issue #29537: https://github.com/saltstack/salt/issues/29537 Based on the note above, to install an earlier version of pyVmomi than the version currently listed in PyPi, run the following: .. code-block:: bash pip install pyVmomi==5.5.0.2014.1.1 The 5.5.0.2014.1.1 is a known stable version that this original ESXi State Module was developed against. ESXCLI ------ Currently, about a third of the functions used in the vSphere Execution Module require the ESXCLI package be installed on the machine running the Proxy Minion process. The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware provides vCLI package installation instructions for `vSphere 5.5`_ and `vSphere 6.0`_. .. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html .. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html Once all of the required dependencies are in place and the vCLI package is installed, you can check to see if you can connect to your ESXi host or vCenter server by running the following command: .. code-block:: bash esxcli -s <host-location> -u <username> -p <password> system syslog config get If the connection was successful, ESXCLI was successfully installed on your system. You should see output related to the ESXi host's syslog configuration. .. note:: Be aware that some functionality in this state module may depend on the type of license attached to the ESXi host. For example, certain services are only available to manipulate service state or policies with a VMware vSphere Enterprise or Enterprise Plus license, while others are available with a Standard license. The ``ntpd`` service is restricted to an Enterprise Plus license, while ``ssh`` is available via the Standard license. Please see the `vSphere Comparison`_ page for more information. .. _vSphere Comparison: https://www.vmware.com/products/vsphere/compare About ----- This state module was written to be used in conjunction with Salt's :mod:`ESXi Proxy Minion <salt.proxy.esxi>`. For a tutorial on how to use Salt's ESXi Proxy Minion, please refer to the :ref:`ESXi Proxy Minion Tutorial <tutorial-esxi-proxy>` for configuration examples, dependency installation instructions, how to run remote execution functions against ESXi hosts via a Salt Proxy Minion, and a larger state example. ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import logging import sys import re # Import Salt Libs from salt.ext import six import salt.utils.files from salt.exceptions import CommandExecutionError, InvalidConfigError, \ VMwareObjectRetrievalError, VMwareSaltError, VMwareApiError, \ ArgumentValueError from salt.utils.decorators import depends from salt.config.schemas.esxi import DiskGroupsDiskScsiAddressSchema, \ HostCacheSchema # External libraries try: import jsonschema HAS_JSONSCHEMA = True except ImportError: HAS_JSONSCHEMA = False # Get Logging Started log = logging.getLogger(__name__) try: from pyVmomi import VmomiSupport # We check the supported vim versions to infer the pyVmomi version if 'vim25/6.0' in VmomiSupport.versionMap and \ sys.version_info > (2, 7) and sys.version_info < (2, 7, 9): log.debug('pyVmomi not loaded: Incompatible versions ' 'of Python. See Issue #29537.') raise ImportError() HAS_PYVMOMI = True except ImportError: HAS_PYVMOMI = False def __virtual__(): return 'esxi.cmd' in __salt__ def coredump_configured(name, enabled, dump_ip, host_vnic='vmk0', dump_port=6500): ''' Ensures a host's core dump configuration. name Name of the state. enabled Sets whether or not ESXi core dump collection should be enabled. This is a boolean value set to ``True`` or ``False`` to enable or disable core dumps. Note that ESXi requires that the core dump must be enabled before any other parameters may be set. This also affects the ``changes`` results in the state return dictionary. If ``enabled`` is ``False``, we can't obtain any previous settings to compare other state variables, resulting in many ``old`` references returning ``None``. Once ``enabled`` is ``True`` the ``changes`` dictionary comparisons will be more accurate. This is due to the way the system coredemp network configuration command returns data. dump_ip The IP address of host that will accept the dump. host_vnic Host VNic port through which to communicate. Defaults to ``vmk0``. dump_port TCP port to use for the dump. Defaults to ``6500``. Example: .. code-block:: yaml configure-host-coredump: esxi.coredump_configured: - enabled: True - dump_ip: 'my-coredump-ip.example.com' ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} esxi_cmd = 'esxi.cmd' enabled_msg = 'ESXi requires that the core dump must be enabled ' \ 'before any other parameters may be set.' host = __pillar__['proxy']['host'] current_config = __salt__[esxi_cmd]('get_coredump_network_config').get(host) error = current_config.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret current_config = current_config.get('Coredump Config') current_enabled = current_config.get('enabled') # Configure coredump enabled state, if there are changes. if current_enabled != enabled: enabled_changes = {'enabled': {'old': current_enabled, 'new': enabled}} # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('coredump_network_enable', enabled=enabled).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret # Allow users to disable core dump, but then return since # nothing else can be set if core dump is disabled. if not enabled: ret['result'] = True ret['comment'] = enabled_msg ret['changes'].update(enabled_changes) return ret ret['changes'].update(enabled_changes) elif not enabled: # If current_enabled and enabled match, but are both False, # We must return before configuring anything. This isn't a # failure as core dump may be disabled intentionally. ret['result'] = True ret['comment'] = enabled_msg return ret # Test for changes with all remaining configurations. The changes flag is used # To detect changes, and then set_coredump_network_config is called one time. changes = False current_ip = current_config.get('ip') if current_ip != dump_ip: ret['changes'].update({'dump_ip': {'old': current_ip, 'new': dump_ip}}) changes = True current_vnic = current_config.get('host_vnic') if current_vnic != host_vnic: ret['changes'].update({'host_vnic': {'old': current_vnic, 'new': host_vnic}}) changes = True current_port = current_config.get('port') if current_port != six.text_type(dump_port): ret['changes'].update({'dump_port': {'old': current_port, 'new': six.text_type(dump_port)}}) changes = True # Only run the command if not using test=True and changes were detected. if not __opts__['test'] and changes is True: response = __salt__[esxi_cmd]('set_coredump_network_config', dump_ip=dump_ip, host_vnic=host_vnic, dump_port=dump_port).get(host) if response.get('success') is False: msg = response.get('stderr') if not msg: msg = response.get('stdout') ret['comment'] = 'Error: {0}'.format(msg) return ret ret['result'] = True if ret['changes'] == {}: ret['comment'] = 'Core Dump configuration is already in the desired state.' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'Core dump configuration will change.' return ret def password_present(name, password): ''' Ensures the given password is set on the ESXi host. Passwords cannot be obtained from host, so if a password is set in this state, the ``vsphere.update_host_password`` function will always run (except when using test=True functionality) and the state's changes dictionary will always be populated. The username for which the password will change is the same username that is used to authenticate against the ESXi host via the Proxy Minion. For example, if the pillar definition for the proxy username is defined as ``root``, then the username that the password will be updated for via this state is ``root``. name Name of the state. password The new password to change on the host. Example: .. code-block:: yaml configure-host-password: esxi.password_present: - password: 'new-bad-password' ''' ret = {'name': name, 'result': True, 'changes': {'old': 'unknown', 'new': '********'}, 'comment': 'Host password was updated.'} esxi_cmd = 'esxi.cmd' if __opts__['test']: ret['result'] = None ret['comment'] = 'Host password will change.' return ret else: try: __salt__[esxi_cmd]('update_host_password', new_password=password) except CommandExecutionError as err: ret['result'] = False ret['comment'] = 'Error: {0}'.format(err) return ret return ret def ntp_configured(name, service_running, ntp_servers=None, service_policy=None, service_restart=False, update_datetime=False): ''' Ensures a host's NTP server configuration such as setting NTP servers, ensuring the NTP daemon is running or stopped, or restarting the NTP daemon for the ESXi host. name Name of the state. service_running Ensures the running state of the ntp daemon for the host. Boolean value where ``True`` indicates that ntpd should be running and ``False`` indicates that it should be stopped. ntp_servers A list of servers that should be added to the ESXi host's NTP configuration. service_policy The policy to set for the NTP service. .. note:: When setting the service policy to ``off`` or ``on``, you *must* quote the setting. If you don't, the yaml parser will set the string to a boolean, which will cause trouble checking for stateful changes and will error when trying to set the policy on the ESXi host. service_restart If set to ``True``, the ntp daemon will be restarted, regardless of its previous running state. Default is ``False``. update_datetime If set to ``True``, the date/time on the given host will be updated to UTC. Default setting is ``False``. This option should be used with caution since network delays and execution delays can result in time skews. Example: .. code-block:: yaml configure-host-ntp: esxi.ntp_configured: - service_running: True - ntp_servers: - 192.174.1.100 - 192.174.1.200 - service_policy: 'on' - service_restart: True ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} esxi_cmd = 'esxi.cmd' host = __pillar__['proxy']['host'] ntpd = 'ntpd' ntp_config = __salt__[esxi_cmd]('get_ntp_config').get(host) ntp_running = __salt__[esxi_cmd]('get_service_running', service_name=ntpd).get(host) error = ntp_running.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ntp_running = ntp_running.get(ntpd) # Configure NTP Servers for the Host if ntp_servers and set(ntp_servers) != set(ntp_config): # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('set_ntp_config', ntp_servers=ntp_servers).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret # Set changes dictionary for ntp_servers ret['changes'].update({'ntp_servers': {'old': ntp_config, 'new': ntp_servers}}) # Configure service_running state if service_running != ntp_running: # Only run the command if not using test=True if not __opts__['test']: # Start ntdp if service_running=True if ntp_running is True: response = __salt__[esxi_cmd]('service_start', service_name=ntpd).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret # Stop ntpd if service_running=False else: response = __salt__[esxi_cmd]('service_stop', service_name=ntpd).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'service_running': {'old': ntp_running, 'new': service_running}}) # Configure service_policy if service_policy: current_service_policy = __salt__[esxi_cmd]('get_service_policy', service_name=ntpd).get(host) error = current_service_policy.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret current_service_policy = current_service_policy.get(ntpd) if service_policy != current_service_policy: # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('set_service_policy', service_name=ntpd, service_policy=service_policy).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'service_policy': {'old': current_service_policy, 'new': service_policy}}) # Update datetime, if requested. if update_datetime: # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('update_host_datetime').get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'update_datetime': {'old': '', 'new': 'Host datetime was updated.'}}) # Restart ntp_service if service_restart=True if service_restart: # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('service_restart', service_name=ntpd).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'service_restart': {'old': '', 'new': 'NTP Daemon Restarted.'}}) ret['result'] = True if ret['changes'] == {}: ret['comment'] = 'NTP is already in the desired state.' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'NTP state will change.' return ret def vmotion_configured(name, enabled, device='vmk0'): ''' Configures a host's VMotion properties such as enabling VMotion and setting the device VirtualNic that VMotion will use. name Name of the state. enabled Ensures whether or not VMotion should be enabled on a host as a boolean value where ``True`` indicates that VMotion should be enabled and ``False`` indicates that VMotion should be disabled. device The device that uniquely identifies the VirtualNic that will be used for VMotion for the host. Defaults to ``vmk0``. Example: .. code-block:: yaml configure-vmotion: esxi.vmotion_configured: - enabled: True - device: sample-device ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} esxi_cmd = 'esxi.cmd' host = __pillar__['proxy']['host'] current_vmotion_enabled = __salt__[esxi_cmd]('get_vmotion_enabled').get(host) current_vmotion_enabled = current_vmotion_enabled.get('VMotion Enabled') # Configure VMotion Enabled state, if changed. if enabled != current_vmotion_enabled: # Only run the command if not using test=True if not __opts__['test']: # Enable VMotion if enabled=True if enabled is True: response = __salt__[esxi_cmd]('vmotion_enable', device=device).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret # Disable VMotion if enabled=False else: response = __salt__[esxi_cmd]('vmotion_disable').get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'enabled': {'old': current_vmotion_enabled, 'new': enabled}}) ret['result'] = True if ret['changes'] == {}: ret['comment'] = 'VMotion configuration is already in the desired state.' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'VMotion configuration will change.' return ret def vsan_configured(name, enabled, add_disks_to_vsan=False): ''' Configures a host's VSAN properties such as enabling or disabling VSAN, or adding VSAN-eligible disks to the VSAN system for the host. name Name of the state. enabled Ensures whether or not VSAN should be enabled on a host as a boolean value where ``True`` indicates that VSAN should be enabled and ``False`` indicates that VSAN should be disabled. add_disks_to_vsan If set to ``True``, any VSAN-eligible disks for the given host will be added to the host's VSAN system. Default is ``False``. Example: .. code-block:: yaml configure-host-vsan: esxi.vsan_configured: - enabled: True - add_disks_to_vsan: True ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} esxi_cmd = 'esxi.cmd' host = __pillar__['proxy']['host'] current_vsan_enabled = __salt__[esxi_cmd]('get_vsan_enabled').get(host) error = current_vsan_enabled.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret current_vsan_enabled = current_vsan_enabled.get('VSAN Enabled') # Configure VSAN Enabled state, if changed. if enabled != current_vsan_enabled: # Only run the command if not using test=True if not __opts__['test']: # Enable VSAN if enabled=True if enabled is True: response = __salt__[esxi_cmd]('vsan_enable').get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret # Disable VSAN if enabled=False else: response = __salt__[esxi_cmd]('vsan_disable').get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'enabled': {'old': current_vsan_enabled, 'new': enabled}}) # Add any eligible disks to VSAN, if requested. if add_disks_to_vsan: current_eligible_disks = __salt__[esxi_cmd]('get_vsan_eligible_disks').get(host) error = current_eligible_disks.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret disks = current_eligible_disks.get('Eligible') if disks and isinstance(disks, list): # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('vsan_add_disks').get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'add_disks_to_vsan': {'old': '', 'new': disks}}) ret['result'] = True if ret['changes'] == {}: ret['comment'] = 'VSAN configuration is already in the desired state.' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'VSAN configuration will change.' return ret def ssh_configured(name, service_running, ssh_key=None, ssh_key_file=None, service_policy=None, service_restart=False, certificate_verify=False): ''' Manage the SSH configuration for a host including whether or not SSH is running or the presence of a given SSH key. Note: Only one ssh key can be uploaded for root. Uploading a second key will replace any existing key. name Name of the state. service_running Ensures whether or not the SSH service should be running on a host. Represented as a boolean value where ``True`` indicates that SSH should be running and ``False`` indicates that SSH should stopped. In order to update SSH keys, the SSH service must be running. ssh_key Public SSH key to added to the authorized_keys file on the ESXi host. You can use ``ssh_key`` or ``ssh_key_file``, but not both. ssh_key_file File containing the public SSH key to be added to the authorized_keys file on the ESXi host. You can use ``ssh_key_file`` or ``ssh_key``, but not both. service_policy The policy to set for the NTP service. .. note:: When setting the service policy to ``off`` or ``on``, you *must* quote the setting. If you don't, the yaml parser will set the string to a boolean, which will cause trouble checking for stateful changes and will error when trying to set the policy on the ESXi host. service_restart If set to ``True``, the SSH service will be restarted, regardless of its previous running state. Default is ``False``. certificate_verify If set to ``True``, the SSL connection must present a valid certificate. Default is ``False``. Example: .. code-block:: yaml configure-host-ssh: esxi.ssh_configured: - service_running: True - ssh_key_file: /etc/salt/ssh_keys/my_key.pub - service_policy: 'on' - service_restart: True - certificate_verify: True ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} esxi_cmd = 'esxi.cmd' host = __pillar__['proxy']['host'] ssh = 'ssh' ssh_running = __salt__[esxi_cmd]('get_service_running', service_name=ssh).get(host) error = ssh_running.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ssh_running = ssh_running.get(ssh) # Configure SSH service_running state, if changed. if service_running != ssh_running: # Only actually run the command if not using test=True if not __opts__['test']: # Start SSH if service_running=True if service_running is True: enable = __salt__[esxi_cmd]('service_start', service_name=ssh).get(host) error = enable.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret # Disable SSH if service_running=False else: disable = __salt__[esxi_cmd]('service_stop', service_name=ssh).get(host) error = disable.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'service_running': {'old': ssh_running, 'new': service_running}}) # If uploading an SSH key or SSH key file, see if there's a current # SSH key and compare the current key to the key set in the state. current_ssh_key, ssh_key_changed = None, False if ssh_key or ssh_key_file: current_ssh_key = __salt__[esxi_cmd]('get_ssh_key', certificate_verify=certificate_verify) error = current_ssh_key.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret current_ssh_key = current_ssh_key.get('key') if current_ssh_key: clean_current_key = _strip_key(current_ssh_key).split(' ') if not ssh_key: ssh_key = '' # Open ssh key file and read in contents to create one key string with salt.utils.files.fopen(ssh_key_file, 'r') as key_file: for line in key_file: if line.startswith('#'): # Commented line continue ssh_key = ssh_key + line clean_ssh_key = _strip_key(ssh_key).split(' ') # Check that the first two list items of clean key lists are equal. if clean_current_key[0] != clean_ssh_key[0] or clean_current_key[1] != clean_ssh_key[1]: ssh_key_changed = True else: # If current_ssh_key is None, but we're setting a new key with # either ssh_key or ssh_key_file, then we need to flag the change. ssh_key_changed = True # Upload SSH key, if changed. if ssh_key_changed: if not __opts__['test']: # Upload key response = __salt__[esxi_cmd]('upload_ssh_key', ssh_key=ssh_key, ssh_key_file=ssh_key_file, certificate_verify=certificate_verify) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'SSH Key': {'old': current_ssh_key, 'new': ssh_key if ssh_key else ssh_key_file}}) # Configure service_policy if service_policy: current_service_policy = __salt__[esxi_cmd]('get_service_policy', service_name=ssh).get(host) error = current_service_policy.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret current_service_policy = current_service_policy.get(ssh) if service_policy != current_service_policy: # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('set_service_policy', service_name=ssh, service_policy=service_policy).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'service_policy': {'old': current_service_policy, 'new': service_policy}}) # Restart ssh_service if service_restart=True if service_restart: # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('service_restart', service_name=ssh).get(host) error = response.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret ret['changes'].update({'service_restart': {'old': '', 'new': 'SSH service restarted.'}}) ret['result'] = True if ret['changes'] == {}: ret['comment'] = 'SSH service is already in the desired state.' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'SSH service state will change.' return ret def syslog_configured(name, syslog_configs, firewall=True, reset_service=True, reset_syslog_config=False, reset_configs=None): ''' Ensures the specified syslog configuration parameters. By default, this state will reset the syslog service after any new or changed parameters are set successfully. name Name of the state. syslog_configs Name of parameter to set (corresponds to the command line switch for esxcli without the double dashes (--)) Valid syslog_config values are ``logdir``, ``loghost``, ``logdir-unique``, ``default-rotate``, ``default-size``, and ``default-timeout``. Each syslog_config option also needs a configuration value to set. For example, ``loghost`` requires URLs or IP addresses to use for logging. Multiple log servers can be specified by listing them, comma-separated, but without spaces before or after commas (reference: https://blogs.vmware.com/vsphere/2012/04/configuring-multiple-syslog-servers-for-esxi-5.html) firewall Enable the firewall rule set for syslog. Defaults to ``True``. reset_service After a successful parameter set, reset the service. Defaults to ``True``. reset_syslog_config Resets the syslog service to it's default settings. Defaults to ``False``. If set to ``True``, default settings defined by the list of syslog configs in ``reset_configs`` will be reset before running any other syslog settings. reset_configs A comma-delimited list of parameters to reset. Only runs if ``reset_syslog_config`` is set to ``True``. If ``reset_syslog_config`` is set to ``True``, but no syslog configs are listed in ``reset_configs``, then ``reset_configs`` will be set to ``all`` by default. See ``syslog_configs`` parameter above for a list of valid options. Example: .. code-block:: yaml configure-host-syslog: esxi.syslog_configured: - syslog_configs: loghost: ssl://localhost:5432,tcp://10.1.0.1:1514 default-timeout: 120 - firewall: True - reset_service: True - reset_syslog_config: True - reset_configs: loghost,default-timeout ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} esxi_cmd = 'esxi.cmd' host = __pillar__['proxy']['host'] if reset_syslog_config: if not reset_configs: reset_configs = 'all' # Only run the command if not using test=True if not __opts__['test']: reset = __salt__[esxi_cmd]('reset_syslog_config', syslog_config=reset_configs).get(host) for key, val in six.iteritems(reset): if isinstance(val, bool): continue if not val.get('success'): msg = val.get('message') if not msg: msg = 'There was an error resetting a syslog config \'{0}\'.' \ 'Please check debug logs.'.format(val) ret['comment'] = 'Error: {0}'.format(msg) return ret ret['changes'].update({'reset_syslog_config': {'old': '', 'new': reset_configs}}) current_firewall = __salt__[esxi_cmd]('get_firewall_status').get(host) error = current_firewall.get('Error') if error: ret['comment'] = 'Error: {0}'.format(error) return ret current_firewall = current_firewall.get('rulesets').get('syslog') if current_firewall != firewall: # Only run the command if not using test=True if not __opts__['test']: enabled = __salt__[esxi_cmd]('enable_firewall_ruleset', ruleset_enable=firewall, ruleset_name='syslog').get(host) if enabled.get('retcode') != 0: err = enabled.get('stderr') out = enabled.get('stdout') ret['comment'] = 'Error: {0}'.format(err if err else out) return ret ret['changes'].update({'firewall': {'old': current_firewall, 'new': firewall}}) current_syslog_config = __salt__[esxi_cmd]('get_syslog_config').get(host) for key, val in six.iteritems(syslog_configs): # The output of get_syslog_config has different keys than the keys # Used to set syslog_config values. We need to look them up first. try: lookup_key = _lookup_syslog_config(key) except KeyError: ret['comment'] = '\'{0}\' is not a valid config variable.'.format(key) return ret current_val = current_syslog_config[lookup_key] if six.text_type(current_val) != six.text_type(val): # Only run the command if not using test=True if not __opts__['test']: response = __salt__[esxi_cmd]('set_syslog_config', syslog_config=key, config_value=val, firewall=firewall, reset_service=reset_service).get(host) success = response.get(key).get('success') if not success: msg = response.get(key).get('message') if not msg: msg = 'There was an error setting syslog config \'{0}\'. ' \ 'Please check debug logs.'.format(key) ret['comment'] = msg return ret if not ret['changes'].get('syslog_config'): ret['changes'].update({'syslog_config': {}}) ret['changes']['syslog_config'].update({key: {'old': current_val, 'new': val}}) ret['result'] = True if ret['changes'] == {}: ret['comment'] = 'Syslog is already in the desired state.' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'Syslog state will change.' return ret @depends(HAS_PYVMOMI) @depends(HAS_JSONSCHEMA) def diskgroups_configured(name, diskgroups, erase_disks=False): ''' Configures the disk groups to use for vsan. This function will do the following: 1. Check whether or not all disks in the diskgroup spec exist, and raises and errors if they do not. 2. Create diskgroups with the correct disk configurations if diskgroup (identified by the cache disk canonical name) doesn't exist 3. Adds extra capacity disks to the existing diskgroup Example: .. code:: python { 'cache_scsi_addr': 'vmhba1:C0:T0:L0', 'capacity_scsi_addrs': [ 'vmhba2:C0:T0:L0', 'vmhba3:C0:T0:L0', 'vmhba4:C0:T0:L0', ] } name Mandatory state name diskgroups Disk group representation containing scsi disk addresses. Scsi addresses are expected for disks in the diskgroup: erase_disks Specifies whether to erase all partitions on all disks member of the disk group before the disk group is created. Default value is False. ''' proxy_details = __salt__['esxi.get_details']() hostname = proxy_details['host'] if not proxy_details.get('vcenter') \ else proxy_details['esxi_host'] log.info('Running state %s for host \'%s\'', name, hostname) # Variable used to return the result of the invocation ret = {'name': name, 'result': None, 'changes': {}, 'comments': None} # Signals if errors have been encountered errors = False # Signals if changes are required changes = False comments = [] diskgroup_changes = {} si = None try: log.trace('Validating diskgroups_configured input') schema = DiskGroupsDiskScsiAddressSchema.serialize() try: jsonschema.validate({'diskgroups': diskgroups, 'erase_disks': erase_disks}, schema) except jsonschema.exceptions.ValidationError as exc: raise InvalidConfigError(exc) si = __salt__['vsphere.get_service_instance_via_proxy']() host_disks = __salt__['vsphere.list_disks'](service_instance=si) if not host_disks: raise VMwareObjectRetrievalError( 'No disks retrieved from host \'{0}\''.format(hostname)) scsi_addr_to_disk_map = {d['scsi_address']: d for d in host_disks} log.trace('scsi_addr_to_disk_map = %s', scsi_addr_to_disk_map) existing_diskgroups = \ __salt__['vsphere.list_diskgroups'](service_instance=si) cache_disk_to_existing_diskgroup_map = \ {dg['cache_disk']: dg for dg in existing_diskgroups} except CommandExecutionError as err: log.error('Error: %s', err) if si: __salt__['vsphere.disconnect'](si) ret.update({ 'result': False if not __opts__['test'] else None, 'comment': six.text_type(err)}) return ret # Iterate through all of the disk groups for idx, dg in enumerate(diskgroups): # Check for cache disk if not dg['cache_scsi_addr'] in scsi_addr_to_disk_map: comments.append('No cache disk with scsi address \'{0}\' was ' 'found.'.format(dg['cache_scsi_addr'])) log.error(comments[-1]) errors = True continue # Check for capacity disks cache_disk_id = scsi_addr_to_disk_map[dg['cache_scsi_addr']]['id'] cache_disk_display = '{0} (id:{1})'.format(dg['cache_scsi_addr'], cache_disk_id) bad_scsi_addrs = [] capacity_disk_ids = [] capacity_disk_displays = [] for scsi_addr in dg['capacity_scsi_addrs']: if scsi_addr not in scsi_addr_to_disk_map: bad_scsi_addrs.append(scsi_addr) continue capacity_disk_ids.append(scsi_addr_to_disk_map[scsi_addr]['id']) capacity_disk_displays.append( '{0} (id:{1})'.format(scsi_addr, capacity_disk_ids[-1])) if bad_scsi_addrs: comments.append('Error in diskgroup #{0}: capacity disks with ' 'scsi addresses {1} were not found.' ''.format(idx, ', '.join(['\'{0}\''.format(a) for a in bad_scsi_addrs]))) log.error(comments[-1]) errors = True continue if not cache_disk_to_existing_diskgroup_map.get(cache_disk_id): # A new diskgroup needs to be created log.trace('erase_disks = %s', erase_disks) if erase_disks: if __opts__['test']: comments.append('State {0} will ' 'erase all disks of disk group #{1}; ' 'cache disk: \'{2}\', ' 'capacity disk(s): {3}.' ''.format(name, idx, cache_disk_display, ', '.join( ['\'{}\''.format(a) for a in capacity_disk_displays]))) else: # Erase disk group disks for disk_id in [cache_disk_id] + capacity_disk_ids: __salt__['vsphere.erase_disk_partitions']( disk_id=disk_id, service_instance=si) comments.append('Erased disks of diskgroup #{0}; ' 'cache disk: \'{1}\', capacity disk(s): ' '{2}'.format( idx, cache_disk_display, ', '.join(['\'{0}\''.format(a) for a in capacity_disk_displays]))) log.info(comments[-1]) if __opts__['test']: comments.append('State {0} will create ' 'the disk group #{1}; cache disk: \'{2}\', ' 'capacity disk(s): {3}.' .format(name, idx, cache_disk_display, ', '.join(['\'{0}\''.format(a) for a in capacity_disk_displays]))) log.info(comments[-1]) changes = True continue try: __salt__['vsphere.create_diskgroup'](cache_disk_id, capacity_disk_ids, safety_checks=False, service_instance=si) except VMwareSaltError as err: comments.append('Error creating disk group #{0}: ' '{1}.'.format(idx, err)) log.error(comments[-1]) errors = True continue comments.append('Created disk group #\'{0}\'.'.format(idx)) log.info(comments[-1]) diskgroup_changes[six.text_type(idx)] = \ {'new': {'cache': cache_disk_display, 'capacity': capacity_disk_displays}} changes = True continue # The diskgroup exists; checking the capacity disks log.debug('Disk group #%s exists. Checking capacity disks: %s.', idx, capacity_disk_displays) existing_diskgroup = \ cache_disk_to_existing_diskgroup_map.get(cache_disk_id) existing_capacity_disk_displays = \ ['{0} (id:{1})'.format([d['scsi_address'] for d in host_disks if d['id'] == disk_id][0], disk_id) for disk_id in existing_diskgroup['capacity_disks']] # Populate added disks and removed disks and their displays added_capacity_disk_ids = [] added_capacity_disk_displays = [] removed_capacity_disk_ids = [] removed_capacity_disk_displays = [] for disk_id in capacity_disk_ids: if disk_id not in existing_diskgroup['capacity_disks']: disk_scsi_addr = [d['scsi_address'] for d in host_disks if d['id'] == disk_id][0] added_capacity_disk_ids.append(disk_id) added_capacity_disk_displays.append( '{0} (id:{1})'.format(disk_scsi_addr, disk_id)) for disk_id in existing_diskgroup['capacity_disks']: if disk_id not in capacity_disk_ids: disk_scsi_addr = [d['scsi_address'] for d in host_disks if d['id'] == disk_id][0] removed_capacity_disk_ids.append(disk_id) removed_capacity_disk_displays.append( '{0} (id:{1})'.format(disk_scsi_addr, disk_id)) log.debug('Disk group #%s: existing capacity disk ids: %s; added ' 'capacity disk ids: %s; removed capacity disk ids: %s', idx, existing_capacity_disk_displays, added_capacity_disk_displays, removed_capacity_disk_displays) #TODO revisit this when removing capacity disks is supported if removed_capacity_disk_ids: comments.append( 'Error removing capacity disk(s) {0} from disk group #{1}; ' 'operation is not supported.' ''.format(', '.join(['\'{0}\''.format(id) for id in removed_capacity_disk_displays]), idx)) log.error(comments[-1]) errors = True continue if added_capacity_disk_ids: # Capacity disks need to be added to disk group # Building a string representation of the capacity disks # that need to be added s = ', '.join(['\'{0}\''.format(id) for id in added_capacity_disk_displays]) if __opts__['test']: comments.append('State {0} will add ' 'capacity disk(s) {1} to disk group #{2}.' ''.format(name, s, idx)) log.info(comments[-1]) changes = True continue try: __salt__['vsphere.add_capacity_to_diskgroup']( cache_disk_id, added_capacity_disk_ids, safety_checks=False, service_instance=si) except VMwareSaltError as err: comments.append('Error adding capacity disk(s) {0} to ' 'disk group #{1}: {2}.'.format(s, idx, err)) log.error(comments[-1]) errors = True continue com = ('Added capacity disk(s) {0} to disk group #{1}' ''.format(s, idx)) log.info(com) comments.append(com) diskgroup_changes[six.text_type(idx)] = \ {'new': {'cache': cache_disk_display, 'capacity': capacity_disk_displays}, 'old': {'cache': cache_disk_display, 'capacity': existing_capacity_disk_displays}} changes = True continue # No capacity needs to be added s = ('Disk group #{0} is correctly configured. Nothing to be done.' ''.format(idx)) log.info(s) comments.append(s) __salt__['vsphere.disconnect'](si) #Build the final return message result = (True if not (changes or errors) else # no changes/errors None if __opts__['test'] else # running in test mode False if errors else True) # found errors; defaults to True ret.update({'result': result, 'comment': '\n'.join(comments), 'changes': diskgroup_changes}) return ret @depends(HAS_PYVMOMI) @depends(HAS_JSONSCHEMA) def _lookup_syslog_config(config): ''' Helper function that looks up syslog_config keys available from ``vsphere.get_syslog_config``. ''' lookup = {'default-timeout': 'Default Network Retry Timeout', 'logdir': 'Local Log Output', 'default-size': 'Local Logging Default Rotation Size', 'logdir-unique': 'Log To Unique Subdirectory', 'default-rotate': 'Local Logging Default Rotations', 'loghost': 'Remote Host'} return lookup.get(config) def _strip_key(key_string): ''' Strips an SSH key string of white space and line endings and returns the new string. key_string The string to be stripped. ''' key_string.strip() key_string.replace('\n', '') key_string.replace('\r\n', '') return key_string
saltstack/salt
salt/states/cimc.py
hostname
python
def hostname(name, hostname=None): ''' Ensures that the hostname is set to the specified value. .. versionadded:: 2019.2.0 name: The name of the module function to execute. hostname(str): The hostname of the server. SLS Example: .. code-block:: yaml set_name: cimc.hostname: - hostname: foobar ''' ret = _default_ret(name) current_name = __salt__['cimc.get_hostname']() req_change = False try: if current_name != hostname: req_change = True if req_change: update = __salt__['cimc.set_hostname'](hostname) if not update: ret['result'] = False ret['comment'] = "Error setting hostname." return ret ret['changes']['before'] = current_name ret['changes']['after'] = hostname ret['comment'] = "Hostname modified." else: ret['comment'] = "Hostname already configured. No changes required." except Exception as err: ret['result'] = False ret['comment'] = "Error occurred setting hostname." log.error(err) return ret ret['result'] = True return ret
Ensures that the hostname is set to the specified value. .. versionadded:: 2019.2.0 name: The name of the module function to execute. hostname(str): The hostname of the server. SLS Example: .. code-block:: yaml set_name: cimc.hostname: - hostname: foobar
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/cimc.py#L46-L100
[ "def _default_ret(name):\n '''\n Set the default response values.\n\n '''\n ret = {\n 'name': name,\n 'changes': {},\n 'result': False,\n 'comment': ''\n }\n return ret\n" ]
# -*- coding: utf-8 -*- ''' A state module to manage Cisco UCS chassis devices. :codeauthor: ``Spencer Ervin <spencer_ervin@hotmail.com>`` :maturity: new :depends: none :platform: unix About ===== This state module was designed to handle connections to a Cisco Unified Computing System (UCS) chassis. This module relies on the CIMC proxy module to interface with the device. .. seealso:: :py:mod:`CIMC Proxy Module <salt.proxy.cimc>` ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import logging log = logging.getLogger(__name__) def __virtual__(): return 'cimc.get_system_info' in __salt__ def _default_ret(name): ''' Set the default response values. ''' ret = { 'name': name, 'changes': {}, 'result': False, 'comment': '' } return ret def logging_levels(name, remote=None, local=None): ''' Ensures that the logging levels are set on the device. The logging levels must match the following options: emergency, alert, critical, error, warning, notice, informational, debug. .. versionadded:: 2019.2.0 name: The name of the module function to execute. remote(str): The logging level for SYSLOG logs. local(str): The logging level for the local device. SLS Example: .. code-block:: yaml logging_levels: cimc.logging_levels: - remote: informational - local: notice ''' ret = _default_ret(name) syslog_conf = __salt__['cimc.get_syslog_settings']() req_change = False try: syslog_dict = syslog_conf['outConfigs']['commSyslog'][0] if remote and syslog_dict['remoteSeverity'] != remote: req_change = True elif local and syslog_dict['localSeverity'] != local: req_change = True if req_change: update = __salt__['cimc.set_logging_levels'](remote, local) if update['outConfig']['commSyslog'][0]['status'] != 'modified': ret['result'] = False ret['comment'] = "Error setting logging levels." return ret ret['changes']['before'] = syslog_conf ret['changes']['after'] = __salt__['cimc.get_syslog_settings']() ret['comment'] = "Logging level settings modified." else: ret['comment'] = "Logging level already configured. No changes required." except Exception as err: ret['result'] = False ret['comment'] = "Error occurred setting logging level settings." log.error(err) return ret ret['result'] = True return ret def ntp(name, servers): ''' Ensures that the NTP servers are configured. Servers are provided as an individual string or list format. Only four NTP servers will be reviewed. Any entries past four will be ignored. name: The name of the module function to execute. servers(str, list): The IP address or FQDN of the NTP servers. SLS Example: .. code-block:: yaml ntp_configuration_list: cimc.ntp: - servers: - foo.bar.com - 10.10.10.10 ntp_configuration_str: cimc.ntp: - servers: foo.bar.com ''' ret = _default_ret(name) ntp_servers = ['', '', '', ''] # Parse our server arguments if isinstance(servers, list): i = 0 for x in servers: ntp_servers[i] = x i += 1 else: ntp_servers[0] = servers conf = __salt__['cimc.get_ntp']() # Check if our NTP configuration is already set req_change = False try: if conf['outConfigs']['commNtpProvider'][0]['ntpEnable'] != 'yes' \ or ntp_servers[0] != conf['outConfigs']['commNtpProvider'][0]['ntpServer1'] \ or ntp_servers[1] != conf['outConfigs']['commNtpProvider'][0]['ntpServer2'] \ or ntp_servers[2] != conf['outConfigs']['commNtpProvider'][0]['ntpServer3'] \ or ntp_servers[3] != conf['outConfigs']['commNtpProvider'][0]['ntpServer4']: req_change = True except KeyError as err: ret['result'] = False ret['comment'] = "Unable to confirm current NTP settings." log.error(err) return ret if req_change: try: update = __salt__['cimc.set_ntp_server'](ntp_servers[0], ntp_servers[1], ntp_servers[2], ntp_servers[3]) if update['outConfig']['commNtpProvider'][0]['status'] != 'modified': ret['result'] = False ret['comment'] = "Error setting NTP configuration." return ret except Exception as err: ret['result'] = False ret['comment'] = "Error setting NTP configuration." log.error(err) return ret ret['changes']['before'] = conf ret['changes']['after'] = __salt__['cimc.get_ntp']() ret['comment'] = "NTP settings modified." else: ret['comment'] = "NTP already configured. No changes required." ret['result'] = True return ret def power_configuration(name, policy=None, delayType=None, delayValue=None): ''' Ensures that the power configuration is configured on the system. This is only available on some C-Series servers. .. versionadded:: 2019.2.0 name: The name of the module function to execute. policy(str): The action to be taken when chassis power is restored after an unexpected power loss. This can be one of the following: reset: The server is allowed to boot up normally when power is restored. The server can restart immediately or, optionally, after a fixed or random delay. stay-off: The server remains off until it is manually restarted. last-state: The server restarts and the system attempts to restore any processes that were running before power was lost. delayType(str): If the selected policy is reset, the restart can be delayed with this option. This can be one of the following: fixed: The server restarts after a fixed delay. random: The server restarts after a random delay. delayValue(int): If a fixed delay is selected, once chassis power is restored and the Cisco IMC has finished rebooting, the system waits for the specified number of seconds before restarting the server. Enter an integer between 0 and 240. SLS Example: .. code-block:: yaml reset_power: cimc.power_configuration: - policy: reset - delayType: fixed - delayValue: 0 power_off: cimc.power_configuration: - policy: stay-off ''' ret = _default_ret(name) power_conf = __salt__['cimc.get_power_configuration']() req_change = False try: power_dict = power_conf['outConfigs']['biosVfResumeOnACPowerLoss'][0] if policy and power_dict['vpResumeOnACPowerLoss'] != policy: req_change = True elif policy == "reset": if power_dict['delayType'] != delayType: req_change = True elif power_dict['delayType'] == "fixed": if str(power_dict['delay']) != str(delayValue): req_change = True else: ret['result'] = False ret['comment'] = "The power policy must be specified." return ret if req_change: update = __salt__['cimc.set_power_configuration'](policy, delayType, delayValue) if update['outConfig']['biosVfResumeOnACPowerLoss'][0]['status'] != 'modified': ret['result'] = False ret['comment'] = "Error setting power configuration." return ret ret['changes']['before'] = power_conf ret['changes']['after'] = __salt__['cimc.get_power_configuration']() ret['comment'] = "Power settings modified." else: ret['comment'] = "Power settings already configured. No changes required." except Exception as err: ret['result'] = False ret['comment'] = "Error occurred setting power settings." log.error(err) return ret ret['result'] = True return ret def syslog(name, primary=None, secondary=None): ''' Ensures that the syslog servers are set to the specified values. A value of None will be ignored. name: The name of the module function to execute. primary(str): The IP address or FQDN of the primary syslog server. secondary(str): The IP address or FQDN of the secondary syslog server. SLS Example: .. code-block:: yaml syslog_configuration: cimc.syslog: - primary: 10.10.10.10 - secondary: foo.bar.com ''' ret = _default_ret(name) conf = __salt__['cimc.get_syslog']() req_change = False if primary: prim_change = True if 'outConfigs' in conf and 'commSyslogClient' in conf['outConfigs']: for entry in conf['outConfigs']['commSyslogClient']: if entry['name'] != 'primary': continue if entry['adminState'] == 'enabled' and entry['hostname'] == primary: prim_change = False if prim_change: try: update = __salt__['cimc.set_syslog_server'](primary, "primary") if update['outConfig']['commSyslogClient'][0]['status'] == 'modified': req_change = True else: ret['result'] = False ret['comment'] = "Error setting primary SYSLOG server." return ret except Exception as err: ret['result'] = False ret['comment'] = "Error setting primary SYSLOG server." log.error(err) return ret if secondary: sec_change = True if 'outConfig' in conf and 'commSyslogClient' in conf['outConfig']: for entry in conf['outConfig']['commSyslogClient']: if entry['name'] != 'secondary': continue if entry['adminState'] == 'enabled' and entry['hostname'] == secondary: sec_change = False if sec_change: try: update = __salt__['cimc.set_syslog_server'](secondary, "secondary") if update['outConfig']['commSyslogClient'][0]['status'] == 'modified': req_change = True else: ret['result'] = False ret['comment'] = "Error setting secondary SYSLOG server." return ret except Exception as err: ret['result'] = False ret['comment'] = "Error setting secondary SYSLOG server." log.error(err) return ret if req_change: ret['changes']['before'] = conf ret['changes']['after'] = __salt__['cimc.get_syslog']() ret['comment'] = "SYSLOG settings modified." else: ret['comment'] = "SYSLOG already configured. No changes required." ret['result'] = True return ret def user(name, id='', user='', priv='', password='', status='active'): ''' Ensures that a user is configured on the device. Due to being unable to verify the user password. This is a forced operation. .. versionadded:: 2019.2.0 name: The name of the module function to execute. id(int): The user ID slot on the device. user(str): The username of the user. priv(str): The privilege level of the user. password(str): The password of the user. status(str): The status of the user. Can be either active or inactive. SLS Example: .. code-block:: yaml user_configuration: cimc.user: - id: 11 - user: foo - priv: admin - password: mypassword - status: active ''' ret = _default_ret(name) user_conf = __salt__['cimc.get_users']() try: for entry in user_conf['outConfigs']['aaaUser']: if entry['id'] == str(id): conf = entry if not conf: ret['result'] = False ret['comment'] = "Unable to find requested user id on device. Please verify id is valid." return ret updates = __salt__['cimc.set_user'](str(id), user, password, priv, status) if 'outConfig' in updates: ret['changes']['before'] = conf ret['changes']['after'] = updates['outConfig']['aaaUser'] ret['comment'] = "User settings modified." else: ret['result'] = False ret['comment'] = "Error setting user configuration." return ret except Exception as err: ret['result'] = False ret['comment'] = "Error setting user configuration." log.error(err) return ret ret['result'] = True return ret
saltstack/salt
salt/states/cimc.py
logging_levels
python
def logging_levels(name, remote=None, local=None): ''' Ensures that the logging levels are set on the device. The logging levels must match the following options: emergency, alert, critical, error, warning, notice, informational, debug. .. versionadded:: 2019.2.0 name: The name of the module function to execute. remote(str): The logging level for SYSLOG logs. local(str): The logging level for the local device. SLS Example: .. code-block:: yaml logging_levels: cimc.logging_levels: - remote: informational - local: notice ''' ret = _default_ret(name) syslog_conf = __salt__['cimc.get_syslog_settings']() req_change = False try: syslog_dict = syslog_conf['outConfigs']['commSyslog'][0] if remote and syslog_dict['remoteSeverity'] != remote: req_change = True elif local and syslog_dict['localSeverity'] != local: req_change = True if req_change: update = __salt__['cimc.set_logging_levels'](remote, local) if update['outConfig']['commSyslog'][0]['status'] != 'modified': ret['result'] = False ret['comment'] = "Error setting logging levels." return ret ret['changes']['before'] = syslog_conf ret['changes']['after'] = __salt__['cimc.get_syslog_settings']() ret['comment'] = "Logging level settings modified." else: ret['comment'] = "Logging level already configured. No changes required." except Exception as err: ret['result'] = False ret['comment'] = "Error occurred setting logging level settings." log.error(err) return ret ret['result'] = True return ret
Ensures that the logging levels are set on the device. The logging levels must match the following options: emergency, alert, critical, error, warning, notice, informational, debug. .. versionadded:: 2019.2.0 name: The name of the module function to execute. remote(str): The logging level for SYSLOG logs. local(str): The logging level for the local device. SLS Example: .. code-block:: yaml logging_levels: cimc.logging_levels: - remote: informational - local: notice
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/cimc.py#L103-L165
[ "def _default_ret(name):\n '''\n Set the default response values.\n\n '''\n ret = {\n 'name': name,\n 'changes': {},\n 'result': False,\n 'comment': ''\n }\n return ret\n" ]
# -*- coding: utf-8 -*- ''' A state module to manage Cisco UCS chassis devices. :codeauthor: ``Spencer Ervin <spencer_ervin@hotmail.com>`` :maturity: new :depends: none :platform: unix About ===== This state module was designed to handle connections to a Cisco Unified Computing System (UCS) chassis. This module relies on the CIMC proxy module to interface with the device. .. seealso:: :py:mod:`CIMC Proxy Module <salt.proxy.cimc>` ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import logging log = logging.getLogger(__name__) def __virtual__(): return 'cimc.get_system_info' in __salt__ def _default_ret(name): ''' Set the default response values. ''' ret = { 'name': name, 'changes': {}, 'result': False, 'comment': '' } return ret def hostname(name, hostname=None): ''' Ensures that the hostname is set to the specified value. .. versionadded:: 2019.2.0 name: The name of the module function to execute. hostname(str): The hostname of the server. SLS Example: .. code-block:: yaml set_name: cimc.hostname: - hostname: foobar ''' ret = _default_ret(name) current_name = __salt__['cimc.get_hostname']() req_change = False try: if current_name != hostname: req_change = True if req_change: update = __salt__['cimc.set_hostname'](hostname) if not update: ret['result'] = False ret['comment'] = "Error setting hostname." return ret ret['changes']['before'] = current_name ret['changes']['after'] = hostname ret['comment'] = "Hostname modified." else: ret['comment'] = "Hostname already configured. No changes required." except Exception as err: ret['result'] = False ret['comment'] = "Error occurred setting hostname." log.error(err) return ret ret['result'] = True return ret def ntp(name, servers): ''' Ensures that the NTP servers are configured. Servers are provided as an individual string or list format. Only four NTP servers will be reviewed. Any entries past four will be ignored. name: The name of the module function to execute. servers(str, list): The IP address or FQDN of the NTP servers. SLS Example: .. code-block:: yaml ntp_configuration_list: cimc.ntp: - servers: - foo.bar.com - 10.10.10.10 ntp_configuration_str: cimc.ntp: - servers: foo.bar.com ''' ret = _default_ret(name) ntp_servers = ['', '', '', ''] # Parse our server arguments if isinstance(servers, list): i = 0 for x in servers: ntp_servers[i] = x i += 1 else: ntp_servers[0] = servers conf = __salt__['cimc.get_ntp']() # Check if our NTP configuration is already set req_change = False try: if conf['outConfigs']['commNtpProvider'][0]['ntpEnable'] != 'yes' \ or ntp_servers[0] != conf['outConfigs']['commNtpProvider'][0]['ntpServer1'] \ or ntp_servers[1] != conf['outConfigs']['commNtpProvider'][0]['ntpServer2'] \ or ntp_servers[2] != conf['outConfigs']['commNtpProvider'][0]['ntpServer3'] \ or ntp_servers[3] != conf['outConfigs']['commNtpProvider'][0]['ntpServer4']: req_change = True except KeyError as err: ret['result'] = False ret['comment'] = "Unable to confirm current NTP settings." log.error(err) return ret if req_change: try: update = __salt__['cimc.set_ntp_server'](ntp_servers[0], ntp_servers[1], ntp_servers[2], ntp_servers[3]) if update['outConfig']['commNtpProvider'][0]['status'] != 'modified': ret['result'] = False ret['comment'] = "Error setting NTP configuration." return ret except Exception as err: ret['result'] = False ret['comment'] = "Error setting NTP configuration." log.error(err) return ret ret['changes']['before'] = conf ret['changes']['after'] = __salt__['cimc.get_ntp']() ret['comment'] = "NTP settings modified." else: ret['comment'] = "NTP already configured. No changes required." ret['result'] = True return ret def power_configuration(name, policy=None, delayType=None, delayValue=None): ''' Ensures that the power configuration is configured on the system. This is only available on some C-Series servers. .. versionadded:: 2019.2.0 name: The name of the module function to execute. policy(str): The action to be taken when chassis power is restored after an unexpected power loss. This can be one of the following: reset: The server is allowed to boot up normally when power is restored. The server can restart immediately or, optionally, after a fixed or random delay. stay-off: The server remains off until it is manually restarted. last-state: The server restarts and the system attempts to restore any processes that were running before power was lost. delayType(str): If the selected policy is reset, the restart can be delayed with this option. This can be one of the following: fixed: The server restarts after a fixed delay. random: The server restarts after a random delay. delayValue(int): If a fixed delay is selected, once chassis power is restored and the Cisco IMC has finished rebooting, the system waits for the specified number of seconds before restarting the server. Enter an integer between 0 and 240. SLS Example: .. code-block:: yaml reset_power: cimc.power_configuration: - policy: reset - delayType: fixed - delayValue: 0 power_off: cimc.power_configuration: - policy: stay-off ''' ret = _default_ret(name) power_conf = __salt__['cimc.get_power_configuration']() req_change = False try: power_dict = power_conf['outConfigs']['biosVfResumeOnACPowerLoss'][0] if policy and power_dict['vpResumeOnACPowerLoss'] != policy: req_change = True elif policy == "reset": if power_dict['delayType'] != delayType: req_change = True elif power_dict['delayType'] == "fixed": if str(power_dict['delay']) != str(delayValue): req_change = True else: ret['result'] = False ret['comment'] = "The power policy must be specified." return ret if req_change: update = __salt__['cimc.set_power_configuration'](policy, delayType, delayValue) if update['outConfig']['biosVfResumeOnACPowerLoss'][0]['status'] != 'modified': ret['result'] = False ret['comment'] = "Error setting power configuration." return ret ret['changes']['before'] = power_conf ret['changes']['after'] = __salt__['cimc.get_power_configuration']() ret['comment'] = "Power settings modified." else: ret['comment'] = "Power settings already configured. No changes required." except Exception as err: ret['result'] = False ret['comment'] = "Error occurred setting power settings." log.error(err) return ret ret['result'] = True return ret def syslog(name, primary=None, secondary=None): ''' Ensures that the syslog servers are set to the specified values. A value of None will be ignored. name: The name of the module function to execute. primary(str): The IP address or FQDN of the primary syslog server. secondary(str): The IP address or FQDN of the secondary syslog server. SLS Example: .. code-block:: yaml syslog_configuration: cimc.syslog: - primary: 10.10.10.10 - secondary: foo.bar.com ''' ret = _default_ret(name) conf = __salt__['cimc.get_syslog']() req_change = False if primary: prim_change = True if 'outConfigs' in conf and 'commSyslogClient' in conf['outConfigs']: for entry in conf['outConfigs']['commSyslogClient']: if entry['name'] != 'primary': continue if entry['adminState'] == 'enabled' and entry['hostname'] == primary: prim_change = False if prim_change: try: update = __salt__['cimc.set_syslog_server'](primary, "primary") if update['outConfig']['commSyslogClient'][0]['status'] == 'modified': req_change = True else: ret['result'] = False ret['comment'] = "Error setting primary SYSLOG server." return ret except Exception as err: ret['result'] = False ret['comment'] = "Error setting primary SYSLOG server." log.error(err) return ret if secondary: sec_change = True if 'outConfig' in conf and 'commSyslogClient' in conf['outConfig']: for entry in conf['outConfig']['commSyslogClient']: if entry['name'] != 'secondary': continue if entry['adminState'] == 'enabled' and entry['hostname'] == secondary: sec_change = False if sec_change: try: update = __salt__['cimc.set_syslog_server'](secondary, "secondary") if update['outConfig']['commSyslogClient'][0]['status'] == 'modified': req_change = True else: ret['result'] = False ret['comment'] = "Error setting secondary SYSLOG server." return ret except Exception as err: ret['result'] = False ret['comment'] = "Error setting secondary SYSLOG server." log.error(err) return ret if req_change: ret['changes']['before'] = conf ret['changes']['after'] = __salt__['cimc.get_syslog']() ret['comment'] = "SYSLOG settings modified." else: ret['comment'] = "SYSLOG already configured. No changes required." ret['result'] = True return ret def user(name, id='', user='', priv='', password='', status='active'): ''' Ensures that a user is configured on the device. Due to being unable to verify the user password. This is a forced operation. .. versionadded:: 2019.2.0 name: The name of the module function to execute. id(int): The user ID slot on the device. user(str): The username of the user. priv(str): The privilege level of the user. password(str): The password of the user. status(str): The status of the user. Can be either active or inactive. SLS Example: .. code-block:: yaml user_configuration: cimc.user: - id: 11 - user: foo - priv: admin - password: mypassword - status: active ''' ret = _default_ret(name) user_conf = __salt__['cimc.get_users']() try: for entry in user_conf['outConfigs']['aaaUser']: if entry['id'] == str(id): conf = entry if not conf: ret['result'] = False ret['comment'] = "Unable to find requested user id on device. Please verify id is valid." return ret updates = __salt__['cimc.set_user'](str(id), user, password, priv, status) if 'outConfig' in updates: ret['changes']['before'] = conf ret['changes']['after'] = updates['outConfig']['aaaUser'] ret['comment'] = "User settings modified." else: ret['result'] = False ret['comment'] = "Error setting user configuration." return ret except Exception as err: ret['result'] = False ret['comment'] = "Error setting user configuration." log.error(err) return ret ret['result'] = True return ret
saltstack/salt
salt/states/cimc.py
ntp
python
def ntp(name, servers): ''' Ensures that the NTP servers are configured. Servers are provided as an individual string or list format. Only four NTP servers will be reviewed. Any entries past four will be ignored. name: The name of the module function to execute. servers(str, list): The IP address or FQDN of the NTP servers. SLS Example: .. code-block:: yaml ntp_configuration_list: cimc.ntp: - servers: - foo.bar.com - 10.10.10.10 ntp_configuration_str: cimc.ntp: - servers: foo.bar.com ''' ret = _default_ret(name) ntp_servers = ['', '', '', ''] # Parse our server arguments if isinstance(servers, list): i = 0 for x in servers: ntp_servers[i] = x i += 1 else: ntp_servers[0] = servers conf = __salt__['cimc.get_ntp']() # Check if our NTP configuration is already set req_change = False try: if conf['outConfigs']['commNtpProvider'][0]['ntpEnable'] != 'yes' \ or ntp_servers[0] != conf['outConfigs']['commNtpProvider'][0]['ntpServer1'] \ or ntp_servers[1] != conf['outConfigs']['commNtpProvider'][0]['ntpServer2'] \ or ntp_servers[2] != conf['outConfigs']['commNtpProvider'][0]['ntpServer3'] \ or ntp_servers[3] != conf['outConfigs']['commNtpProvider'][0]['ntpServer4']: req_change = True except KeyError as err: ret['result'] = False ret['comment'] = "Unable to confirm current NTP settings." log.error(err) return ret if req_change: try: update = __salt__['cimc.set_ntp_server'](ntp_servers[0], ntp_servers[1], ntp_servers[2], ntp_servers[3]) if update['outConfig']['commNtpProvider'][0]['status'] != 'modified': ret['result'] = False ret['comment'] = "Error setting NTP configuration." return ret except Exception as err: ret['result'] = False ret['comment'] = "Error setting NTP configuration." log.error(err) return ret ret['changes']['before'] = conf ret['changes']['after'] = __salt__['cimc.get_ntp']() ret['comment'] = "NTP settings modified." else: ret['comment'] = "NTP already configured. No changes required." ret['result'] = True return ret
Ensures that the NTP servers are configured. Servers are provided as an individual string or list format. Only four NTP servers will be reviewed. Any entries past four will be ignored. name: The name of the module function to execute. servers(str, list): The IP address or FQDN of the NTP servers. SLS Example: .. code-block:: yaml ntp_configuration_list: cimc.ntp: - servers: - foo.bar.com - 10.10.10.10 ntp_configuration_str: cimc.ntp: - servers: foo.bar.com
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/cimc.py#L168-L247
[ "def _default_ret(name):\n '''\n Set the default response values.\n\n '''\n ret = {\n 'name': name,\n 'changes': {},\n 'result': False,\n 'comment': ''\n }\n return ret\n" ]
# -*- coding: utf-8 -*- ''' A state module to manage Cisco UCS chassis devices. :codeauthor: ``Spencer Ervin <spencer_ervin@hotmail.com>`` :maturity: new :depends: none :platform: unix About ===== This state module was designed to handle connections to a Cisco Unified Computing System (UCS) chassis. This module relies on the CIMC proxy module to interface with the device. .. seealso:: :py:mod:`CIMC Proxy Module <salt.proxy.cimc>` ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import logging log = logging.getLogger(__name__) def __virtual__(): return 'cimc.get_system_info' in __salt__ def _default_ret(name): ''' Set the default response values. ''' ret = { 'name': name, 'changes': {}, 'result': False, 'comment': '' } return ret def hostname(name, hostname=None): ''' Ensures that the hostname is set to the specified value. .. versionadded:: 2019.2.0 name: The name of the module function to execute. hostname(str): The hostname of the server. SLS Example: .. code-block:: yaml set_name: cimc.hostname: - hostname: foobar ''' ret = _default_ret(name) current_name = __salt__['cimc.get_hostname']() req_change = False try: if current_name != hostname: req_change = True if req_change: update = __salt__['cimc.set_hostname'](hostname) if not update: ret['result'] = False ret['comment'] = "Error setting hostname." return ret ret['changes']['before'] = current_name ret['changes']['after'] = hostname ret['comment'] = "Hostname modified." else: ret['comment'] = "Hostname already configured. No changes required." except Exception as err: ret['result'] = False ret['comment'] = "Error occurred setting hostname." log.error(err) return ret ret['result'] = True return ret def logging_levels(name, remote=None, local=None): ''' Ensures that the logging levels are set on the device. The logging levels must match the following options: emergency, alert, critical, error, warning, notice, informational, debug. .. versionadded:: 2019.2.0 name: The name of the module function to execute. remote(str): The logging level for SYSLOG logs. local(str): The logging level for the local device. SLS Example: .. code-block:: yaml logging_levels: cimc.logging_levels: - remote: informational - local: notice ''' ret = _default_ret(name) syslog_conf = __salt__['cimc.get_syslog_settings']() req_change = False try: syslog_dict = syslog_conf['outConfigs']['commSyslog'][0] if remote and syslog_dict['remoteSeverity'] != remote: req_change = True elif local and syslog_dict['localSeverity'] != local: req_change = True if req_change: update = __salt__['cimc.set_logging_levels'](remote, local) if update['outConfig']['commSyslog'][0]['status'] != 'modified': ret['result'] = False ret['comment'] = "Error setting logging levels." return ret ret['changes']['before'] = syslog_conf ret['changes']['after'] = __salt__['cimc.get_syslog_settings']() ret['comment'] = "Logging level settings modified." else: ret['comment'] = "Logging level already configured. No changes required." except Exception as err: ret['result'] = False ret['comment'] = "Error occurred setting logging level settings." log.error(err) return ret ret['result'] = True return ret def power_configuration(name, policy=None, delayType=None, delayValue=None): ''' Ensures that the power configuration is configured on the system. This is only available on some C-Series servers. .. versionadded:: 2019.2.0 name: The name of the module function to execute. policy(str): The action to be taken when chassis power is restored after an unexpected power loss. This can be one of the following: reset: The server is allowed to boot up normally when power is restored. The server can restart immediately or, optionally, after a fixed or random delay. stay-off: The server remains off until it is manually restarted. last-state: The server restarts and the system attempts to restore any processes that were running before power was lost. delayType(str): If the selected policy is reset, the restart can be delayed with this option. This can be one of the following: fixed: The server restarts after a fixed delay. random: The server restarts after a random delay. delayValue(int): If a fixed delay is selected, once chassis power is restored and the Cisco IMC has finished rebooting, the system waits for the specified number of seconds before restarting the server. Enter an integer between 0 and 240. SLS Example: .. code-block:: yaml reset_power: cimc.power_configuration: - policy: reset - delayType: fixed - delayValue: 0 power_off: cimc.power_configuration: - policy: stay-off ''' ret = _default_ret(name) power_conf = __salt__['cimc.get_power_configuration']() req_change = False try: power_dict = power_conf['outConfigs']['biosVfResumeOnACPowerLoss'][0] if policy and power_dict['vpResumeOnACPowerLoss'] != policy: req_change = True elif policy == "reset": if power_dict['delayType'] != delayType: req_change = True elif power_dict['delayType'] == "fixed": if str(power_dict['delay']) != str(delayValue): req_change = True else: ret['result'] = False ret['comment'] = "The power policy must be specified." return ret if req_change: update = __salt__['cimc.set_power_configuration'](policy, delayType, delayValue) if update['outConfig']['biosVfResumeOnACPowerLoss'][0]['status'] != 'modified': ret['result'] = False ret['comment'] = "Error setting power configuration." return ret ret['changes']['before'] = power_conf ret['changes']['after'] = __salt__['cimc.get_power_configuration']() ret['comment'] = "Power settings modified." else: ret['comment'] = "Power settings already configured. No changes required." except Exception as err: ret['result'] = False ret['comment'] = "Error occurred setting power settings." log.error(err) return ret ret['result'] = True return ret def syslog(name, primary=None, secondary=None): ''' Ensures that the syslog servers are set to the specified values. A value of None will be ignored. name: The name of the module function to execute. primary(str): The IP address or FQDN of the primary syslog server. secondary(str): The IP address or FQDN of the secondary syslog server. SLS Example: .. code-block:: yaml syslog_configuration: cimc.syslog: - primary: 10.10.10.10 - secondary: foo.bar.com ''' ret = _default_ret(name) conf = __salt__['cimc.get_syslog']() req_change = False if primary: prim_change = True if 'outConfigs' in conf and 'commSyslogClient' in conf['outConfigs']: for entry in conf['outConfigs']['commSyslogClient']: if entry['name'] != 'primary': continue if entry['adminState'] == 'enabled' and entry['hostname'] == primary: prim_change = False if prim_change: try: update = __salt__['cimc.set_syslog_server'](primary, "primary") if update['outConfig']['commSyslogClient'][0]['status'] == 'modified': req_change = True else: ret['result'] = False ret['comment'] = "Error setting primary SYSLOG server." return ret except Exception as err: ret['result'] = False ret['comment'] = "Error setting primary SYSLOG server." log.error(err) return ret if secondary: sec_change = True if 'outConfig' in conf and 'commSyslogClient' in conf['outConfig']: for entry in conf['outConfig']['commSyslogClient']: if entry['name'] != 'secondary': continue if entry['adminState'] == 'enabled' and entry['hostname'] == secondary: sec_change = False if sec_change: try: update = __salt__['cimc.set_syslog_server'](secondary, "secondary") if update['outConfig']['commSyslogClient'][0]['status'] == 'modified': req_change = True else: ret['result'] = False ret['comment'] = "Error setting secondary SYSLOG server." return ret except Exception as err: ret['result'] = False ret['comment'] = "Error setting secondary SYSLOG server." log.error(err) return ret if req_change: ret['changes']['before'] = conf ret['changes']['after'] = __salt__['cimc.get_syslog']() ret['comment'] = "SYSLOG settings modified." else: ret['comment'] = "SYSLOG already configured. No changes required." ret['result'] = True return ret def user(name, id='', user='', priv='', password='', status='active'): ''' Ensures that a user is configured on the device. Due to being unable to verify the user password. This is a forced operation. .. versionadded:: 2019.2.0 name: The name of the module function to execute. id(int): The user ID slot on the device. user(str): The username of the user. priv(str): The privilege level of the user. password(str): The password of the user. status(str): The status of the user. Can be either active or inactive. SLS Example: .. code-block:: yaml user_configuration: cimc.user: - id: 11 - user: foo - priv: admin - password: mypassword - status: active ''' ret = _default_ret(name) user_conf = __salt__['cimc.get_users']() try: for entry in user_conf['outConfigs']['aaaUser']: if entry['id'] == str(id): conf = entry if not conf: ret['result'] = False ret['comment'] = "Unable to find requested user id on device. Please verify id is valid." return ret updates = __salt__['cimc.set_user'](str(id), user, password, priv, status) if 'outConfig' in updates: ret['changes']['before'] = conf ret['changes']['after'] = updates['outConfig']['aaaUser'] ret['comment'] = "User settings modified." else: ret['result'] = False ret['comment'] = "Error setting user configuration." return ret except Exception as err: ret['result'] = False ret['comment'] = "Error setting user configuration." log.error(err) return ret ret['result'] = True return ret
saltstack/salt
salt/states/cimc.py
power_configuration
python
def power_configuration(name, policy=None, delayType=None, delayValue=None): ''' Ensures that the power configuration is configured on the system. This is only available on some C-Series servers. .. versionadded:: 2019.2.0 name: The name of the module function to execute. policy(str): The action to be taken when chassis power is restored after an unexpected power loss. This can be one of the following: reset: The server is allowed to boot up normally when power is restored. The server can restart immediately or, optionally, after a fixed or random delay. stay-off: The server remains off until it is manually restarted. last-state: The server restarts and the system attempts to restore any processes that were running before power was lost. delayType(str): If the selected policy is reset, the restart can be delayed with this option. This can be one of the following: fixed: The server restarts after a fixed delay. random: The server restarts after a random delay. delayValue(int): If a fixed delay is selected, once chassis power is restored and the Cisco IMC has finished rebooting, the system waits for the specified number of seconds before restarting the server. Enter an integer between 0 and 240. SLS Example: .. code-block:: yaml reset_power: cimc.power_configuration: - policy: reset - delayType: fixed - delayValue: 0 power_off: cimc.power_configuration: - policy: stay-off ''' ret = _default_ret(name) power_conf = __salt__['cimc.get_power_configuration']() req_change = False try: power_dict = power_conf['outConfigs']['biosVfResumeOnACPowerLoss'][0] if policy and power_dict['vpResumeOnACPowerLoss'] != policy: req_change = True elif policy == "reset": if power_dict['delayType'] != delayType: req_change = True elif power_dict['delayType'] == "fixed": if str(power_dict['delay']) != str(delayValue): req_change = True else: ret['result'] = False ret['comment'] = "The power policy must be specified." return ret if req_change: update = __salt__['cimc.set_power_configuration'](policy, delayType, delayValue) if update['outConfig']['biosVfResumeOnACPowerLoss'][0]['status'] != 'modified': ret['result'] = False ret['comment'] = "Error setting power configuration." return ret ret['changes']['before'] = power_conf ret['changes']['after'] = __salt__['cimc.get_power_configuration']() ret['comment'] = "Power settings modified." else: ret['comment'] = "Power settings already configured. No changes required." except Exception as err: ret['result'] = False ret['comment'] = "Error occurred setting power settings." log.error(err) return ret ret['result'] = True return ret
Ensures that the power configuration is configured on the system. This is only available on some C-Series servers. .. versionadded:: 2019.2.0 name: The name of the module function to execute. policy(str): The action to be taken when chassis power is restored after an unexpected power loss. This can be one of the following: reset: The server is allowed to boot up normally when power is restored. The server can restart immediately or, optionally, after a fixed or random delay. stay-off: The server remains off until it is manually restarted. last-state: The server restarts and the system attempts to restore any processes that were running before power was lost. delayType(str): If the selected policy is reset, the restart can be delayed with this option. This can be one of the following: fixed: The server restarts after a fixed delay. random: The server restarts after a random delay. delayValue(int): If a fixed delay is selected, once chassis power is restored and the Cisco IMC has finished rebooting, the system waits for the specified number of seconds before restarting the server. Enter an integer between 0 and 240. SLS Example: .. code-block:: yaml reset_power: cimc.power_configuration: - policy: reset - delayType: fixed - delayValue: 0 power_off: cimc.power_configuration: - policy: stay-off
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/cimc.py#L250-L348
[ "def _default_ret(name):\n '''\n Set the default response values.\n\n '''\n ret = {\n 'name': name,\n 'changes': {},\n 'result': False,\n 'comment': ''\n }\n return ret\n" ]
# -*- coding: utf-8 -*- ''' A state module to manage Cisco UCS chassis devices. :codeauthor: ``Spencer Ervin <spencer_ervin@hotmail.com>`` :maturity: new :depends: none :platform: unix About ===== This state module was designed to handle connections to a Cisco Unified Computing System (UCS) chassis. This module relies on the CIMC proxy module to interface with the device. .. seealso:: :py:mod:`CIMC Proxy Module <salt.proxy.cimc>` ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import logging log = logging.getLogger(__name__) def __virtual__(): return 'cimc.get_system_info' in __salt__ def _default_ret(name): ''' Set the default response values. ''' ret = { 'name': name, 'changes': {}, 'result': False, 'comment': '' } return ret def hostname(name, hostname=None): ''' Ensures that the hostname is set to the specified value. .. versionadded:: 2019.2.0 name: The name of the module function to execute. hostname(str): The hostname of the server. SLS Example: .. code-block:: yaml set_name: cimc.hostname: - hostname: foobar ''' ret = _default_ret(name) current_name = __salt__['cimc.get_hostname']() req_change = False try: if current_name != hostname: req_change = True if req_change: update = __salt__['cimc.set_hostname'](hostname) if not update: ret['result'] = False ret['comment'] = "Error setting hostname." return ret ret['changes']['before'] = current_name ret['changes']['after'] = hostname ret['comment'] = "Hostname modified." else: ret['comment'] = "Hostname already configured. No changes required." except Exception as err: ret['result'] = False ret['comment'] = "Error occurred setting hostname." log.error(err) return ret ret['result'] = True return ret def logging_levels(name, remote=None, local=None): ''' Ensures that the logging levels are set on the device. The logging levels must match the following options: emergency, alert, critical, error, warning, notice, informational, debug. .. versionadded:: 2019.2.0 name: The name of the module function to execute. remote(str): The logging level for SYSLOG logs. local(str): The logging level for the local device. SLS Example: .. code-block:: yaml logging_levels: cimc.logging_levels: - remote: informational - local: notice ''' ret = _default_ret(name) syslog_conf = __salt__['cimc.get_syslog_settings']() req_change = False try: syslog_dict = syslog_conf['outConfigs']['commSyslog'][0] if remote and syslog_dict['remoteSeverity'] != remote: req_change = True elif local and syslog_dict['localSeverity'] != local: req_change = True if req_change: update = __salt__['cimc.set_logging_levels'](remote, local) if update['outConfig']['commSyslog'][0]['status'] != 'modified': ret['result'] = False ret['comment'] = "Error setting logging levels." return ret ret['changes']['before'] = syslog_conf ret['changes']['after'] = __salt__['cimc.get_syslog_settings']() ret['comment'] = "Logging level settings modified." else: ret['comment'] = "Logging level already configured. No changes required." except Exception as err: ret['result'] = False ret['comment'] = "Error occurred setting logging level settings." log.error(err) return ret ret['result'] = True return ret def ntp(name, servers): ''' Ensures that the NTP servers are configured. Servers are provided as an individual string or list format. Only four NTP servers will be reviewed. Any entries past four will be ignored. name: The name of the module function to execute. servers(str, list): The IP address or FQDN of the NTP servers. SLS Example: .. code-block:: yaml ntp_configuration_list: cimc.ntp: - servers: - foo.bar.com - 10.10.10.10 ntp_configuration_str: cimc.ntp: - servers: foo.bar.com ''' ret = _default_ret(name) ntp_servers = ['', '', '', ''] # Parse our server arguments if isinstance(servers, list): i = 0 for x in servers: ntp_servers[i] = x i += 1 else: ntp_servers[0] = servers conf = __salt__['cimc.get_ntp']() # Check if our NTP configuration is already set req_change = False try: if conf['outConfigs']['commNtpProvider'][0]['ntpEnable'] != 'yes' \ or ntp_servers[0] != conf['outConfigs']['commNtpProvider'][0]['ntpServer1'] \ or ntp_servers[1] != conf['outConfigs']['commNtpProvider'][0]['ntpServer2'] \ or ntp_servers[2] != conf['outConfigs']['commNtpProvider'][0]['ntpServer3'] \ or ntp_servers[3] != conf['outConfigs']['commNtpProvider'][0]['ntpServer4']: req_change = True except KeyError as err: ret['result'] = False ret['comment'] = "Unable to confirm current NTP settings." log.error(err) return ret if req_change: try: update = __salt__['cimc.set_ntp_server'](ntp_servers[0], ntp_servers[1], ntp_servers[2], ntp_servers[3]) if update['outConfig']['commNtpProvider'][0]['status'] != 'modified': ret['result'] = False ret['comment'] = "Error setting NTP configuration." return ret except Exception as err: ret['result'] = False ret['comment'] = "Error setting NTP configuration." log.error(err) return ret ret['changes']['before'] = conf ret['changes']['after'] = __salt__['cimc.get_ntp']() ret['comment'] = "NTP settings modified." else: ret['comment'] = "NTP already configured. No changes required." ret['result'] = True return ret def syslog(name, primary=None, secondary=None): ''' Ensures that the syslog servers are set to the specified values. A value of None will be ignored. name: The name of the module function to execute. primary(str): The IP address or FQDN of the primary syslog server. secondary(str): The IP address or FQDN of the secondary syslog server. SLS Example: .. code-block:: yaml syslog_configuration: cimc.syslog: - primary: 10.10.10.10 - secondary: foo.bar.com ''' ret = _default_ret(name) conf = __salt__['cimc.get_syslog']() req_change = False if primary: prim_change = True if 'outConfigs' in conf and 'commSyslogClient' in conf['outConfigs']: for entry in conf['outConfigs']['commSyslogClient']: if entry['name'] != 'primary': continue if entry['adminState'] == 'enabled' and entry['hostname'] == primary: prim_change = False if prim_change: try: update = __salt__['cimc.set_syslog_server'](primary, "primary") if update['outConfig']['commSyslogClient'][0]['status'] == 'modified': req_change = True else: ret['result'] = False ret['comment'] = "Error setting primary SYSLOG server." return ret except Exception as err: ret['result'] = False ret['comment'] = "Error setting primary SYSLOG server." log.error(err) return ret if secondary: sec_change = True if 'outConfig' in conf and 'commSyslogClient' in conf['outConfig']: for entry in conf['outConfig']['commSyslogClient']: if entry['name'] != 'secondary': continue if entry['adminState'] == 'enabled' and entry['hostname'] == secondary: sec_change = False if sec_change: try: update = __salt__['cimc.set_syslog_server'](secondary, "secondary") if update['outConfig']['commSyslogClient'][0]['status'] == 'modified': req_change = True else: ret['result'] = False ret['comment'] = "Error setting secondary SYSLOG server." return ret except Exception as err: ret['result'] = False ret['comment'] = "Error setting secondary SYSLOG server." log.error(err) return ret if req_change: ret['changes']['before'] = conf ret['changes']['after'] = __salt__['cimc.get_syslog']() ret['comment'] = "SYSLOG settings modified." else: ret['comment'] = "SYSLOG already configured. No changes required." ret['result'] = True return ret def user(name, id='', user='', priv='', password='', status='active'): ''' Ensures that a user is configured on the device. Due to being unable to verify the user password. This is a forced operation. .. versionadded:: 2019.2.0 name: The name of the module function to execute. id(int): The user ID slot on the device. user(str): The username of the user. priv(str): The privilege level of the user. password(str): The password of the user. status(str): The status of the user. Can be either active or inactive. SLS Example: .. code-block:: yaml user_configuration: cimc.user: - id: 11 - user: foo - priv: admin - password: mypassword - status: active ''' ret = _default_ret(name) user_conf = __salt__['cimc.get_users']() try: for entry in user_conf['outConfigs']['aaaUser']: if entry['id'] == str(id): conf = entry if not conf: ret['result'] = False ret['comment'] = "Unable to find requested user id on device. Please verify id is valid." return ret updates = __salt__['cimc.set_user'](str(id), user, password, priv, status) if 'outConfig' in updates: ret['changes']['before'] = conf ret['changes']['after'] = updates['outConfig']['aaaUser'] ret['comment'] = "User settings modified." else: ret['result'] = False ret['comment'] = "Error setting user configuration." return ret except Exception as err: ret['result'] = False ret['comment'] = "Error setting user configuration." log.error(err) return ret ret['result'] = True return ret
saltstack/salt
salt/states/cimc.py
syslog
python
def syslog(name, primary=None, secondary=None): ''' Ensures that the syslog servers are set to the specified values. A value of None will be ignored. name: The name of the module function to execute. primary(str): The IP address or FQDN of the primary syslog server. secondary(str): The IP address or FQDN of the secondary syslog server. SLS Example: .. code-block:: yaml syslog_configuration: cimc.syslog: - primary: 10.10.10.10 - secondary: foo.bar.com ''' ret = _default_ret(name) conf = __salt__['cimc.get_syslog']() req_change = False if primary: prim_change = True if 'outConfigs' in conf and 'commSyslogClient' in conf['outConfigs']: for entry in conf['outConfigs']['commSyslogClient']: if entry['name'] != 'primary': continue if entry['adminState'] == 'enabled' and entry['hostname'] == primary: prim_change = False if prim_change: try: update = __salt__['cimc.set_syslog_server'](primary, "primary") if update['outConfig']['commSyslogClient'][0]['status'] == 'modified': req_change = True else: ret['result'] = False ret['comment'] = "Error setting primary SYSLOG server." return ret except Exception as err: ret['result'] = False ret['comment'] = "Error setting primary SYSLOG server." log.error(err) return ret if secondary: sec_change = True if 'outConfig' in conf and 'commSyslogClient' in conf['outConfig']: for entry in conf['outConfig']['commSyslogClient']: if entry['name'] != 'secondary': continue if entry['adminState'] == 'enabled' and entry['hostname'] == secondary: sec_change = False if sec_change: try: update = __salt__['cimc.set_syslog_server'](secondary, "secondary") if update['outConfig']['commSyslogClient'][0]['status'] == 'modified': req_change = True else: ret['result'] = False ret['comment'] = "Error setting secondary SYSLOG server." return ret except Exception as err: ret['result'] = False ret['comment'] = "Error setting secondary SYSLOG server." log.error(err) return ret if req_change: ret['changes']['before'] = conf ret['changes']['after'] = __salt__['cimc.get_syslog']() ret['comment'] = "SYSLOG settings modified." else: ret['comment'] = "SYSLOG already configured. No changes required." ret['result'] = True return ret
Ensures that the syslog servers are set to the specified values. A value of None will be ignored. name: The name of the module function to execute. primary(str): The IP address or FQDN of the primary syslog server. secondary(str): The IP address or FQDN of the secondary syslog server. SLS Example: .. code-block:: yaml syslog_configuration: cimc.syslog: - primary: 10.10.10.10 - secondary: foo.bar.com
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/cimc.py#L351-L434
[ "def _default_ret(name):\n '''\n Set the default response values.\n\n '''\n ret = {\n 'name': name,\n 'changes': {},\n 'result': False,\n 'comment': ''\n }\n return ret\n" ]
# -*- coding: utf-8 -*- ''' A state module to manage Cisco UCS chassis devices. :codeauthor: ``Spencer Ervin <spencer_ervin@hotmail.com>`` :maturity: new :depends: none :platform: unix About ===== This state module was designed to handle connections to a Cisco Unified Computing System (UCS) chassis. This module relies on the CIMC proxy module to interface with the device. .. seealso:: :py:mod:`CIMC Proxy Module <salt.proxy.cimc>` ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import logging log = logging.getLogger(__name__) def __virtual__(): return 'cimc.get_system_info' in __salt__ def _default_ret(name): ''' Set the default response values. ''' ret = { 'name': name, 'changes': {}, 'result': False, 'comment': '' } return ret def hostname(name, hostname=None): ''' Ensures that the hostname is set to the specified value. .. versionadded:: 2019.2.0 name: The name of the module function to execute. hostname(str): The hostname of the server. SLS Example: .. code-block:: yaml set_name: cimc.hostname: - hostname: foobar ''' ret = _default_ret(name) current_name = __salt__['cimc.get_hostname']() req_change = False try: if current_name != hostname: req_change = True if req_change: update = __salt__['cimc.set_hostname'](hostname) if not update: ret['result'] = False ret['comment'] = "Error setting hostname." return ret ret['changes']['before'] = current_name ret['changes']['after'] = hostname ret['comment'] = "Hostname modified." else: ret['comment'] = "Hostname already configured. No changes required." except Exception as err: ret['result'] = False ret['comment'] = "Error occurred setting hostname." log.error(err) return ret ret['result'] = True return ret def logging_levels(name, remote=None, local=None): ''' Ensures that the logging levels are set on the device. The logging levels must match the following options: emergency, alert, critical, error, warning, notice, informational, debug. .. versionadded:: 2019.2.0 name: The name of the module function to execute. remote(str): The logging level for SYSLOG logs. local(str): The logging level for the local device. SLS Example: .. code-block:: yaml logging_levels: cimc.logging_levels: - remote: informational - local: notice ''' ret = _default_ret(name) syslog_conf = __salt__['cimc.get_syslog_settings']() req_change = False try: syslog_dict = syslog_conf['outConfigs']['commSyslog'][0] if remote and syslog_dict['remoteSeverity'] != remote: req_change = True elif local and syslog_dict['localSeverity'] != local: req_change = True if req_change: update = __salt__['cimc.set_logging_levels'](remote, local) if update['outConfig']['commSyslog'][0]['status'] != 'modified': ret['result'] = False ret['comment'] = "Error setting logging levels." return ret ret['changes']['before'] = syslog_conf ret['changes']['after'] = __salt__['cimc.get_syslog_settings']() ret['comment'] = "Logging level settings modified." else: ret['comment'] = "Logging level already configured. No changes required." except Exception as err: ret['result'] = False ret['comment'] = "Error occurred setting logging level settings." log.error(err) return ret ret['result'] = True return ret def ntp(name, servers): ''' Ensures that the NTP servers are configured. Servers are provided as an individual string or list format. Only four NTP servers will be reviewed. Any entries past four will be ignored. name: The name of the module function to execute. servers(str, list): The IP address or FQDN of the NTP servers. SLS Example: .. code-block:: yaml ntp_configuration_list: cimc.ntp: - servers: - foo.bar.com - 10.10.10.10 ntp_configuration_str: cimc.ntp: - servers: foo.bar.com ''' ret = _default_ret(name) ntp_servers = ['', '', '', ''] # Parse our server arguments if isinstance(servers, list): i = 0 for x in servers: ntp_servers[i] = x i += 1 else: ntp_servers[0] = servers conf = __salt__['cimc.get_ntp']() # Check if our NTP configuration is already set req_change = False try: if conf['outConfigs']['commNtpProvider'][0]['ntpEnable'] != 'yes' \ or ntp_servers[0] != conf['outConfigs']['commNtpProvider'][0]['ntpServer1'] \ or ntp_servers[1] != conf['outConfigs']['commNtpProvider'][0]['ntpServer2'] \ or ntp_servers[2] != conf['outConfigs']['commNtpProvider'][0]['ntpServer3'] \ or ntp_servers[3] != conf['outConfigs']['commNtpProvider'][0]['ntpServer4']: req_change = True except KeyError as err: ret['result'] = False ret['comment'] = "Unable to confirm current NTP settings." log.error(err) return ret if req_change: try: update = __salt__['cimc.set_ntp_server'](ntp_servers[0], ntp_servers[1], ntp_servers[2], ntp_servers[3]) if update['outConfig']['commNtpProvider'][0]['status'] != 'modified': ret['result'] = False ret['comment'] = "Error setting NTP configuration." return ret except Exception as err: ret['result'] = False ret['comment'] = "Error setting NTP configuration." log.error(err) return ret ret['changes']['before'] = conf ret['changes']['after'] = __salt__['cimc.get_ntp']() ret['comment'] = "NTP settings modified." else: ret['comment'] = "NTP already configured. No changes required." ret['result'] = True return ret def power_configuration(name, policy=None, delayType=None, delayValue=None): ''' Ensures that the power configuration is configured on the system. This is only available on some C-Series servers. .. versionadded:: 2019.2.0 name: The name of the module function to execute. policy(str): The action to be taken when chassis power is restored after an unexpected power loss. This can be one of the following: reset: The server is allowed to boot up normally when power is restored. The server can restart immediately or, optionally, after a fixed or random delay. stay-off: The server remains off until it is manually restarted. last-state: The server restarts and the system attempts to restore any processes that were running before power was lost. delayType(str): If the selected policy is reset, the restart can be delayed with this option. This can be one of the following: fixed: The server restarts after a fixed delay. random: The server restarts after a random delay. delayValue(int): If a fixed delay is selected, once chassis power is restored and the Cisco IMC has finished rebooting, the system waits for the specified number of seconds before restarting the server. Enter an integer between 0 and 240. SLS Example: .. code-block:: yaml reset_power: cimc.power_configuration: - policy: reset - delayType: fixed - delayValue: 0 power_off: cimc.power_configuration: - policy: stay-off ''' ret = _default_ret(name) power_conf = __salt__['cimc.get_power_configuration']() req_change = False try: power_dict = power_conf['outConfigs']['biosVfResumeOnACPowerLoss'][0] if policy and power_dict['vpResumeOnACPowerLoss'] != policy: req_change = True elif policy == "reset": if power_dict['delayType'] != delayType: req_change = True elif power_dict['delayType'] == "fixed": if str(power_dict['delay']) != str(delayValue): req_change = True else: ret['result'] = False ret['comment'] = "The power policy must be specified." return ret if req_change: update = __salt__['cimc.set_power_configuration'](policy, delayType, delayValue) if update['outConfig']['biosVfResumeOnACPowerLoss'][0]['status'] != 'modified': ret['result'] = False ret['comment'] = "Error setting power configuration." return ret ret['changes']['before'] = power_conf ret['changes']['after'] = __salt__['cimc.get_power_configuration']() ret['comment'] = "Power settings modified." else: ret['comment'] = "Power settings already configured. No changes required." except Exception as err: ret['result'] = False ret['comment'] = "Error occurred setting power settings." log.error(err) return ret ret['result'] = True return ret def user(name, id='', user='', priv='', password='', status='active'): ''' Ensures that a user is configured on the device. Due to being unable to verify the user password. This is a forced operation. .. versionadded:: 2019.2.0 name: The name of the module function to execute. id(int): The user ID slot on the device. user(str): The username of the user. priv(str): The privilege level of the user. password(str): The password of the user. status(str): The status of the user. Can be either active or inactive. SLS Example: .. code-block:: yaml user_configuration: cimc.user: - id: 11 - user: foo - priv: admin - password: mypassword - status: active ''' ret = _default_ret(name) user_conf = __salt__['cimc.get_users']() try: for entry in user_conf['outConfigs']['aaaUser']: if entry['id'] == str(id): conf = entry if not conf: ret['result'] = False ret['comment'] = "Unable to find requested user id on device. Please verify id is valid." return ret updates = __salt__['cimc.set_user'](str(id), user, password, priv, status) if 'outConfig' in updates: ret['changes']['before'] = conf ret['changes']['after'] = updates['outConfig']['aaaUser'] ret['comment'] = "User settings modified." else: ret['result'] = False ret['comment'] = "Error setting user configuration." return ret except Exception as err: ret['result'] = False ret['comment'] = "Error setting user configuration." log.error(err) return ret ret['result'] = True return ret
saltstack/salt
salt/states/cimc.py
user
python
def user(name, id='', user='', priv='', password='', status='active'): ''' Ensures that a user is configured on the device. Due to being unable to verify the user password. This is a forced operation. .. versionadded:: 2019.2.0 name: The name of the module function to execute. id(int): The user ID slot on the device. user(str): The username of the user. priv(str): The privilege level of the user. password(str): The password of the user. status(str): The status of the user. Can be either active or inactive. SLS Example: .. code-block:: yaml user_configuration: cimc.user: - id: 11 - user: foo - priv: admin - password: mypassword - status: active ''' ret = _default_ret(name) user_conf = __salt__['cimc.get_users']() try: for entry in user_conf['outConfigs']['aaaUser']: if entry['id'] == str(id): conf = entry if not conf: ret['result'] = False ret['comment'] = "Unable to find requested user id on device. Please verify id is valid." return ret updates = __salt__['cimc.set_user'](str(id), user, password, priv, status) if 'outConfig' in updates: ret['changes']['before'] = conf ret['changes']['after'] = updates['outConfig']['aaaUser'] ret['comment'] = "User settings modified." else: ret['result'] = False ret['comment'] = "Error setting user configuration." return ret except Exception as err: ret['result'] = False ret['comment'] = "Error setting user configuration." log.error(err) return ret ret['result'] = True return ret
Ensures that a user is configured on the device. Due to being unable to verify the user password. This is a forced operation. .. versionadded:: 2019.2.0 name: The name of the module function to execute. id(int): The user ID slot on the device. user(str): The username of the user. priv(str): The privilege level of the user. password(str): The password of the user. status(str): The status of the user. Can be either active or inactive. SLS Example: .. code-block:: yaml user_configuration: cimc.user: - id: 11 - user: foo - priv: admin - password: mypassword - status: active
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/cimc.py#L437-L503
[ "def _default_ret(name):\n '''\n Set the default response values.\n\n '''\n ret = {\n 'name': name,\n 'changes': {},\n 'result': False,\n 'comment': ''\n }\n return ret\n" ]
# -*- coding: utf-8 -*- ''' A state module to manage Cisco UCS chassis devices. :codeauthor: ``Spencer Ervin <spencer_ervin@hotmail.com>`` :maturity: new :depends: none :platform: unix About ===== This state module was designed to handle connections to a Cisco Unified Computing System (UCS) chassis. This module relies on the CIMC proxy module to interface with the device. .. seealso:: :py:mod:`CIMC Proxy Module <salt.proxy.cimc>` ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import logging log = logging.getLogger(__name__) def __virtual__(): return 'cimc.get_system_info' in __salt__ def _default_ret(name): ''' Set the default response values. ''' ret = { 'name': name, 'changes': {}, 'result': False, 'comment': '' } return ret def hostname(name, hostname=None): ''' Ensures that the hostname is set to the specified value. .. versionadded:: 2019.2.0 name: The name of the module function to execute. hostname(str): The hostname of the server. SLS Example: .. code-block:: yaml set_name: cimc.hostname: - hostname: foobar ''' ret = _default_ret(name) current_name = __salt__['cimc.get_hostname']() req_change = False try: if current_name != hostname: req_change = True if req_change: update = __salt__['cimc.set_hostname'](hostname) if not update: ret['result'] = False ret['comment'] = "Error setting hostname." return ret ret['changes']['before'] = current_name ret['changes']['after'] = hostname ret['comment'] = "Hostname modified." else: ret['comment'] = "Hostname already configured. No changes required." except Exception as err: ret['result'] = False ret['comment'] = "Error occurred setting hostname." log.error(err) return ret ret['result'] = True return ret def logging_levels(name, remote=None, local=None): ''' Ensures that the logging levels are set on the device. The logging levels must match the following options: emergency, alert, critical, error, warning, notice, informational, debug. .. versionadded:: 2019.2.0 name: The name of the module function to execute. remote(str): The logging level for SYSLOG logs. local(str): The logging level for the local device. SLS Example: .. code-block:: yaml logging_levels: cimc.logging_levels: - remote: informational - local: notice ''' ret = _default_ret(name) syslog_conf = __salt__['cimc.get_syslog_settings']() req_change = False try: syslog_dict = syslog_conf['outConfigs']['commSyslog'][0] if remote and syslog_dict['remoteSeverity'] != remote: req_change = True elif local and syslog_dict['localSeverity'] != local: req_change = True if req_change: update = __salt__['cimc.set_logging_levels'](remote, local) if update['outConfig']['commSyslog'][0]['status'] != 'modified': ret['result'] = False ret['comment'] = "Error setting logging levels." return ret ret['changes']['before'] = syslog_conf ret['changes']['after'] = __salt__['cimc.get_syslog_settings']() ret['comment'] = "Logging level settings modified." else: ret['comment'] = "Logging level already configured. No changes required." except Exception as err: ret['result'] = False ret['comment'] = "Error occurred setting logging level settings." log.error(err) return ret ret['result'] = True return ret def ntp(name, servers): ''' Ensures that the NTP servers are configured. Servers are provided as an individual string or list format. Only four NTP servers will be reviewed. Any entries past four will be ignored. name: The name of the module function to execute. servers(str, list): The IP address or FQDN of the NTP servers. SLS Example: .. code-block:: yaml ntp_configuration_list: cimc.ntp: - servers: - foo.bar.com - 10.10.10.10 ntp_configuration_str: cimc.ntp: - servers: foo.bar.com ''' ret = _default_ret(name) ntp_servers = ['', '', '', ''] # Parse our server arguments if isinstance(servers, list): i = 0 for x in servers: ntp_servers[i] = x i += 1 else: ntp_servers[0] = servers conf = __salt__['cimc.get_ntp']() # Check if our NTP configuration is already set req_change = False try: if conf['outConfigs']['commNtpProvider'][0]['ntpEnable'] != 'yes' \ or ntp_servers[0] != conf['outConfigs']['commNtpProvider'][0]['ntpServer1'] \ or ntp_servers[1] != conf['outConfigs']['commNtpProvider'][0]['ntpServer2'] \ or ntp_servers[2] != conf['outConfigs']['commNtpProvider'][0]['ntpServer3'] \ or ntp_servers[3] != conf['outConfigs']['commNtpProvider'][0]['ntpServer4']: req_change = True except KeyError as err: ret['result'] = False ret['comment'] = "Unable to confirm current NTP settings." log.error(err) return ret if req_change: try: update = __salt__['cimc.set_ntp_server'](ntp_servers[0], ntp_servers[1], ntp_servers[2], ntp_servers[3]) if update['outConfig']['commNtpProvider'][0]['status'] != 'modified': ret['result'] = False ret['comment'] = "Error setting NTP configuration." return ret except Exception as err: ret['result'] = False ret['comment'] = "Error setting NTP configuration." log.error(err) return ret ret['changes']['before'] = conf ret['changes']['after'] = __salt__['cimc.get_ntp']() ret['comment'] = "NTP settings modified." else: ret['comment'] = "NTP already configured. No changes required." ret['result'] = True return ret def power_configuration(name, policy=None, delayType=None, delayValue=None): ''' Ensures that the power configuration is configured on the system. This is only available on some C-Series servers. .. versionadded:: 2019.2.0 name: The name of the module function to execute. policy(str): The action to be taken when chassis power is restored after an unexpected power loss. This can be one of the following: reset: The server is allowed to boot up normally when power is restored. The server can restart immediately or, optionally, after a fixed or random delay. stay-off: The server remains off until it is manually restarted. last-state: The server restarts and the system attempts to restore any processes that were running before power was lost. delayType(str): If the selected policy is reset, the restart can be delayed with this option. This can be one of the following: fixed: The server restarts after a fixed delay. random: The server restarts after a random delay. delayValue(int): If a fixed delay is selected, once chassis power is restored and the Cisco IMC has finished rebooting, the system waits for the specified number of seconds before restarting the server. Enter an integer between 0 and 240. SLS Example: .. code-block:: yaml reset_power: cimc.power_configuration: - policy: reset - delayType: fixed - delayValue: 0 power_off: cimc.power_configuration: - policy: stay-off ''' ret = _default_ret(name) power_conf = __salt__['cimc.get_power_configuration']() req_change = False try: power_dict = power_conf['outConfigs']['biosVfResumeOnACPowerLoss'][0] if policy and power_dict['vpResumeOnACPowerLoss'] != policy: req_change = True elif policy == "reset": if power_dict['delayType'] != delayType: req_change = True elif power_dict['delayType'] == "fixed": if str(power_dict['delay']) != str(delayValue): req_change = True else: ret['result'] = False ret['comment'] = "The power policy must be specified." return ret if req_change: update = __salt__['cimc.set_power_configuration'](policy, delayType, delayValue) if update['outConfig']['biosVfResumeOnACPowerLoss'][0]['status'] != 'modified': ret['result'] = False ret['comment'] = "Error setting power configuration." return ret ret['changes']['before'] = power_conf ret['changes']['after'] = __salt__['cimc.get_power_configuration']() ret['comment'] = "Power settings modified." else: ret['comment'] = "Power settings already configured. No changes required." except Exception as err: ret['result'] = False ret['comment'] = "Error occurred setting power settings." log.error(err) return ret ret['result'] = True return ret def syslog(name, primary=None, secondary=None): ''' Ensures that the syslog servers are set to the specified values. A value of None will be ignored. name: The name of the module function to execute. primary(str): The IP address or FQDN of the primary syslog server. secondary(str): The IP address or FQDN of the secondary syslog server. SLS Example: .. code-block:: yaml syslog_configuration: cimc.syslog: - primary: 10.10.10.10 - secondary: foo.bar.com ''' ret = _default_ret(name) conf = __salt__['cimc.get_syslog']() req_change = False if primary: prim_change = True if 'outConfigs' in conf and 'commSyslogClient' in conf['outConfigs']: for entry in conf['outConfigs']['commSyslogClient']: if entry['name'] != 'primary': continue if entry['adminState'] == 'enabled' and entry['hostname'] == primary: prim_change = False if prim_change: try: update = __salt__['cimc.set_syslog_server'](primary, "primary") if update['outConfig']['commSyslogClient'][0]['status'] == 'modified': req_change = True else: ret['result'] = False ret['comment'] = "Error setting primary SYSLOG server." return ret except Exception as err: ret['result'] = False ret['comment'] = "Error setting primary SYSLOG server." log.error(err) return ret if secondary: sec_change = True if 'outConfig' in conf and 'commSyslogClient' in conf['outConfig']: for entry in conf['outConfig']['commSyslogClient']: if entry['name'] != 'secondary': continue if entry['adminState'] == 'enabled' and entry['hostname'] == secondary: sec_change = False if sec_change: try: update = __salt__['cimc.set_syslog_server'](secondary, "secondary") if update['outConfig']['commSyslogClient'][0]['status'] == 'modified': req_change = True else: ret['result'] = False ret['comment'] = "Error setting secondary SYSLOG server." return ret except Exception as err: ret['result'] = False ret['comment'] = "Error setting secondary SYSLOG server." log.error(err) return ret if req_change: ret['changes']['before'] = conf ret['changes']['after'] = __salt__['cimc.get_syslog']() ret['comment'] = "SYSLOG settings modified." else: ret['comment'] = "SYSLOG already configured. No changes required." ret['result'] = True return ret
saltstack/salt
salt/proxy/esxcluster.py
init
python
def init(opts): ''' This function gets called when the proxy starts up. For login the protocol and port are cached. ''' log.debug('Initting esxcluster proxy module in process %s', os.getpid()) log.debug('Validating esxcluster proxy input') schema = EsxclusterProxySchema.serialize() log.trace('schema = %s', schema) proxy_conf = merge(opts.get('proxy', {}), __pillar__.get('proxy', {})) log.trace('proxy_conf = %s', proxy_conf) try: jsonschema.validate(proxy_conf, schema) except jsonschema.exceptions.ValidationError as exc: raise salt.exceptions.InvalidConfigError(exc) # Save mandatory fields in cache for key in ('vcenter', 'datacenter', 'cluster', 'mechanism'): DETAILS[key] = proxy_conf[key] # Additional validation if DETAILS['mechanism'] == 'userpass': if 'username' not in proxy_conf: raise salt.exceptions.InvalidConfigError( 'Mechanism is set to \'userpass\', but no ' '\'username\' key found in proxy config.') if 'passwords' not in proxy_conf: raise salt.exceptions.InvalidConfigError( 'Mechanism is set to \'userpass\', but no ' '\'passwords\' key found in proxy config.') for key in ('username', 'passwords'): DETAILS[key] = proxy_conf[key] else: if 'domain' not in proxy_conf: raise salt.exceptions.InvalidConfigError( 'Mechanism is set to \'sspi\', but no ' '\'domain\' key found in proxy config.') if 'principal' not in proxy_conf: raise salt.exceptions.InvalidConfigError( 'Mechanism is set to \'sspi\', but no ' '\'principal\' key found in proxy config.') for key in ('domain', 'principal'): DETAILS[key] = proxy_conf[key] # Save optional DETAILS['protocol'] = proxy_conf.get('protocol') DETAILS['port'] = proxy_conf.get('port') # Test connection if DETAILS['mechanism'] == 'userpass': # Get the correct login details log.debug('Retrieving credentials and testing vCenter connection for ' 'mehchanism \'userpass\'') try: username, password = find_credentials() DETAILS['password'] = password except salt.exceptions.SaltSystemExit as err: log.critical('Error: %s', err) return False return True
This function gets called when the proxy starts up. For login the protocol and port are cached.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/proxy/esxcluster.py#L197-L257
[ "def merge(obj_a, obj_b, strategy='smart', renderer='yaml', merge_lists=False):\n if strategy == 'smart':\n if renderer.split('|')[-1] == 'yamlex' or renderer.startswith('yamlex_'):\n strategy = 'aggregate'\n else:\n strategy = 'recurse'\n\n if strategy == 'list':\n merged = merge_list(obj_a, obj_b)\n elif strategy == 'recurse':\n merged = merge_recurse(obj_a, obj_b, merge_lists)\n elif strategy == 'aggregate':\n #: level = 1 merge at least root data\n merged = merge_aggregate(obj_a, obj_b)\n elif strategy == 'overwrite':\n merged = merge_overwrite(obj_a, obj_b, merge_lists)\n elif strategy == 'none':\n # If we do not want to merge, there is only one pillar passed, so we can safely use the default recurse,\n # we just do not want to log an error\n merged = merge_recurse(obj_a, obj_b)\n else:\n log.warning(\n 'Unknown merging strategy \\'%s\\', fallback to recurse',\n strategy\n )\n merged = merge_recurse(obj_a, obj_b)\n\n return merged\n", "def find_credentials():\n '''\n Cycle through all the possible credentials and return the first one that\n works.\n '''\n\n # if the username and password were already found don't fo though the\n # connection process again\n if 'username' in DETAILS and 'password' in DETAILS:\n return DETAILS['username'], DETAILS['password']\n\n passwords = DETAILS['passwords']\n for password in passwords:\n DETAILS['password'] = password\n if not __salt__['vsphere.test_vcenter_connection']():\n # We are unable to authenticate\n continue\n # If we have data returned from above, we've successfully authenticated.\n return DETAILS['username'], password\n # We've reached the end of the list without successfully authenticating.\n raise salt.exceptions.VMwareConnectionError('Cannot complete login due to '\n 'incorrect credentials.')\n", "def serialize(cls, id_=None):\n # The order matters\n serialized = OrderedDict()\n if id_ is not None:\n # This is meant as a configuration section, sub json schema\n serialized['id'] = '{0}/{1}.json#'.format(BASE_SCHEMA_URL, id_)\n else:\n # Main configuration block, json schema\n serialized['$schema'] = 'http://json-schema.org/draft-04/schema#'\n if cls.title is not None:\n serialized['title'] = cls.title\n if cls.description is not None:\n if cls.description == cls.__doc__:\n serialized['description'] = textwrap.dedent(cls.description).strip()\n else:\n serialized['description'] = cls.description\n\n required = []\n ordering = []\n serialized['type'] = 'object'\n properties = OrderedDict()\n cls.after_items_update = []\n for name in cls._order: # pylint: disable=E1133\n skip_order = False\n item_name = None\n if name in cls._sections: # pylint: disable=E1135\n section = cls._sections[name]\n serialized_section = section.serialize(None if section.__flatten__ is True else name)\n if section.__flatten__ is True:\n # Flatten the configuration section into the parent\n # configuration\n properties.update(serialized_section['properties'])\n if 'x-ordering' in serialized_section:\n ordering.extend(serialized_section['x-ordering'])\n if 'required' in serialized_section:\n required.extend(serialized_section['required'])\n if hasattr(section, 'after_items_update'):\n cls.after_items_update.extend(section.after_items_update)\n skip_order = True\n else:\n # Store it as a configuration section\n properties[name] = serialized_section\n\n if name in cls._items: # pylint: disable=E1135\n config = cls._items[name]\n item_name = config.__item_name__ or name\n # Handle the configuration items defined in the class instance\n if config.__flatten__ is True:\n serialized_config = config.serialize()\n cls.after_items_update.append(serialized_config)\n skip_order = True\n else:\n properties[item_name] = config.serialize()\n\n if config.required:\n # If it's a required item, add it to the required list\n required.append(item_name)\n\n if skip_order is False:\n # Store the order of the item\n if item_name is not None:\n if item_name not in ordering:\n ordering.append(item_name)\n else:\n if name not in ordering:\n ordering.append(name)\n\n if properties:\n serialized['properties'] = properties\n\n # Update the serialized object with any items to include after properties.\n # Do not overwrite properties already existing in the serialized dict.\n if cls.after_items_update:\n after_items_update = {}\n for entry in cls.after_items_update:\n for name, data in six.iteritems(entry):\n if name in after_items_update:\n if isinstance(after_items_update[name], list):\n after_items_update[name].extend(data)\n else:\n after_items_update[name] = data\n if after_items_update:\n after_items_update.update(serialized)\n serialized = after_items_update\n\n if required:\n # Only include required if not empty\n serialized['required'] = required\n if ordering:\n # Only include ordering if not empty\n serialized['x-ordering'] = ordering\n serialized['additionalProperties'] = cls.__allow_additional_items__\n return serialized\n" ]
# -*- coding: utf-8 -*- ''' Proxy Minion interface module for managing VMWare ESXi clusters. Dependencies ============ - pyVmomi - jsonschema Configuration ============= To use this integration proxy module, please configure the following: Pillar ------ Proxy minions get their configuration from Salt's Pillar. This can now happen from the proxy's configuration file. Example pillars: ``userpass`` mechanism: .. code-block:: yaml proxy: proxytype: esxcluster cluster: <cluster name> datacenter: <datacenter name> vcenter: <ip or dns name of parent vcenter> mechanism: userpass username: <vCenter username> passwords: (required if userpass is used) - first_password - second_password - third_password ``sspi`` mechanism: .. code-block:: yaml proxy: proxytype: esxcluster cluster: <cluster name> datacenter: <datacenter name> vcenter: <ip or dns name of parent vcenter> mechanism: sspi domain: <user domain> principal: <host kerberos principal> proxytype ^^^^^^^^^ To use this Proxy Module, set this to ``esxdatacenter``. cluster ^^^^^^^ Name of the managed cluster. Required. datacenter ^^^^^^^^^^ Name of the datacenter the managed cluster is in. Required. vcenter ^^^^^^^ The location of the VMware vCenter server (host of ip) where the datacenter should be managed. Required. mechanism ^^^^^^^^ The mechanism used to connect to the vCenter server. Supported values are ``userpass`` and ``sspi``. Required. Note: Connections are attempted using all (``username``, ``password``) combinations on proxy startup. username ^^^^^^^^ The username used to login to the host, such as ``root``. Required if mechanism is ``userpass``. passwords ^^^^^^^^^ A list of passwords to be used to try and login to the vCenter server. At least one password in this list is required if mechanism is ``userpass``. When the proxy comes up, it will try the passwords listed in order. domain ^^^^^^ User domain. Required if mechanism is ``sspi``. principal ^^^^^^^^ Kerberos principal. Rquired if mechanism is ``sspi``. protocol ^^^^^^^^ If the ESXi host is not using the default protocol, set this value to an alternate protocol. Default is ``https``. port ^^^^ If the ESXi host is not using the default port, set this value to an alternate port. Default is ``443``. Salt Proxy ---------- After your pillar is in place, you can test the proxy. The proxy can run on any machine that has network connectivity to your Salt Master and to the vCenter server in the pillar. SaltStack recommends that the machine running the salt-proxy process also run a regular minion, though it is not strictly necessary. To start a proxy minion one needs to establish its identity <id>: .. code-block:: bash salt-proxy --proxyid <proxy_id> On the machine that will run the proxy, make sure there is a configuration file present. By default this is ``/etc/salt/proxy``. If in a different location, the ``<configuration_folder>`` has to be specified when running the proxy: file with at least the following in it: .. code-block:: bash salt-proxy --proxyid <proxy_id> -c <configuration_folder> Commands -------- Once the proxy is running it will connect back to the specified master and individual commands can be runs against it: .. code-block:: bash # Master - minion communication salt <cluster_name> test.ping # Test vcenter connection salt <cluster_name> vsphere.test_vcenter_connection States ------ Associated states are documented in :mod:`salt.states.esxcluster </ref/states/all/salt.states.esxcluster>`. Look there to find an example structure for Pillar as well as an example ``.sls`` file for configuring an ESX cluster from scratch. ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import logging import os # Import Salt Libs import salt.exceptions from salt.config.schemas.esxcluster import EsxclusterProxySchema from salt.utils.dictupdate import merge # This must be present or the Salt loader won't load this module. __proxyenabled__ = ['esxcluster'] # External libraries try: import jsonschema HAS_JSONSCHEMA = True except ImportError: HAS_JSONSCHEMA = False # Variables are scoped to this module so we can have persistent data # across calls to fns in here. GRAINS_CACHE = {} DETAILS = {} # Set up logging log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'esxcluster' def __virtual__(): ''' Only load if the vsphere execution module is available. ''' if HAS_JSONSCHEMA: return __virtualname__ return False, 'The esxcluster proxy module did not load.' def ping(): ''' Returns True. CLI Example: .. code-block:: bash salt esx-cluster test.ping ''' return True def shutdown(): ''' Shutdown the connection to the proxy device. For this proxy, shutdown is a no-op. ''' log.debug('esxcluster proxy shutdown() called...') def find_credentials(): ''' Cycle through all the possible credentials and return the first one that works. ''' # if the username and password were already found don't fo though the # connection process again if 'username' in DETAILS and 'password' in DETAILS: return DETAILS['username'], DETAILS['password'] passwords = DETAILS['passwords'] for password in passwords: DETAILS['password'] = password if not __salt__['vsphere.test_vcenter_connection'](): # We are unable to authenticate continue # If we have data returned from above, we've successfully authenticated. return DETAILS['username'], password # We've reached the end of the list without successfully authenticating. raise salt.exceptions.VMwareConnectionError('Cannot complete login due to ' 'incorrect credentials.') def get_details(): ''' Function that returns the cached details ''' return DETAILS
saltstack/salt
salt/proxy/esxcluster.py
find_credentials
python
def find_credentials(): ''' Cycle through all the possible credentials and return the first one that works. ''' # if the username and password were already found don't fo though the # connection process again if 'username' in DETAILS and 'password' in DETAILS: return DETAILS['username'], DETAILS['password'] passwords = DETAILS['passwords'] for password in passwords: DETAILS['password'] = password if not __salt__['vsphere.test_vcenter_connection'](): # We are unable to authenticate continue # If we have data returned from above, we've successfully authenticated. return DETAILS['username'], password # We've reached the end of the list without successfully authenticating. raise salt.exceptions.VMwareConnectionError('Cannot complete login due to ' 'incorrect credentials.')
Cycle through all the possible credentials and return the first one that works.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/proxy/esxcluster.py#L281-L302
null
# -*- coding: utf-8 -*- ''' Proxy Minion interface module for managing VMWare ESXi clusters. Dependencies ============ - pyVmomi - jsonschema Configuration ============= To use this integration proxy module, please configure the following: Pillar ------ Proxy minions get their configuration from Salt's Pillar. This can now happen from the proxy's configuration file. Example pillars: ``userpass`` mechanism: .. code-block:: yaml proxy: proxytype: esxcluster cluster: <cluster name> datacenter: <datacenter name> vcenter: <ip or dns name of parent vcenter> mechanism: userpass username: <vCenter username> passwords: (required if userpass is used) - first_password - second_password - third_password ``sspi`` mechanism: .. code-block:: yaml proxy: proxytype: esxcluster cluster: <cluster name> datacenter: <datacenter name> vcenter: <ip or dns name of parent vcenter> mechanism: sspi domain: <user domain> principal: <host kerberos principal> proxytype ^^^^^^^^^ To use this Proxy Module, set this to ``esxdatacenter``. cluster ^^^^^^^ Name of the managed cluster. Required. datacenter ^^^^^^^^^^ Name of the datacenter the managed cluster is in. Required. vcenter ^^^^^^^ The location of the VMware vCenter server (host of ip) where the datacenter should be managed. Required. mechanism ^^^^^^^^ The mechanism used to connect to the vCenter server. Supported values are ``userpass`` and ``sspi``. Required. Note: Connections are attempted using all (``username``, ``password``) combinations on proxy startup. username ^^^^^^^^ The username used to login to the host, such as ``root``. Required if mechanism is ``userpass``. passwords ^^^^^^^^^ A list of passwords to be used to try and login to the vCenter server. At least one password in this list is required if mechanism is ``userpass``. When the proxy comes up, it will try the passwords listed in order. domain ^^^^^^ User domain. Required if mechanism is ``sspi``. principal ^^^^^^^^ Kerberos principal. Rquired if mechanism is ``sspi``. protocol ^^^^^^^^ If the ESXi host is not using the default protocol, set this value to an alternate protocol. Default is ``https``. port ^^^^ If the ESXi host is not using the default port, set this value to an alternate port. Default is ``443``. Salt Proxy ---------- After your pillar is in place, you can test the proxy. The proxy can run on any machine that has network connectivity to your Salt Master and to the vCenter server in the pillar. SaltStack recommends that the machine running the salt-proxy process also run a regular minion, though it is not strictly necessary. To start a proxy minion one needs to establish its identity <id>: .. code-block:: bash salt-proxy --proxyid <proxy_id> On the machine that will run the proxy, make sure there is a configuration file present. By default this is ``/etc/salt/proxy``. If in a different location, the ``<configuration_folder>`` has to be specified when running the proxy: file with at least the following in it: .. code-block:: bash salt-proxy --proxyid <proxy_id> -c <configuration_folder> Commands -------- Once the proxy is running it will connect back to the specified master and individual commands can be runs against it: .. code-block:: bash # Master - minion communication salt <cluster_name> test.ping # Test vcenter connection salt <cluster_name> vsphere.test_vcenter_connection States ------ Associated states are documented in :mod:`salt.states.esxcluster </ref/states/all/salt.states.esxcluster>`. Look there to find an example structure for Pillar as well as an example ``.sls`` file for configuring an ESX cluster from scratch. ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import logging import os # Import Salt Libs import salt.exceptions from salt.config.schemas.esxcluster import EsxclusterProxySchema from salt.utils.dictupdate import merge # This must be present or the Salt loader won't load this module. __proxyenabled__ = ['esxcluster'] # External libraries try: import jsonschema HAS_JSONSCHEMA = True except ImportError: HAS_JSONSCHEMA = False # Variables are scoped to this module so we can have persistent data # across calls to fns in here. GRAINS_CACHE = {} DETAILS = {} # Set up logging log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'esxcluster' def __virtual__(): ''' Only load if the vsphere execution module is available. ''' if HAS_JSONSCHEMA: return __virtualname__ return False, 'The esxcluster proxy module did not load.' def init(opts): ''' This function gets called when the proxy starts up. For login the protocol and port are cached. ''' log.debug('Initting esxcluster proxy module in process %s', os.getpid()) log.debug('Validating esxcluster proxy input') schema = EsxclusterProxySchema.serialize() log.trace('schema = %s', schema) proxy_conf = merge(opts.get('proxy', {}), __pillar__.get('proxy', {})) log.trace('proxy_conf = %s', proxy_conf) try: jsonschema.validate(proxy_conf, schema) except jsonschema.exceptions.ValidationError as exc: raise salt.exceptions.InvalidConfigError(exc) # Save mandatory fields in cache for key in ('vcenter', 'datacenter', 'cluster', 'mechanism'): DETAILS[key] = proxy_conf[key] # Additional validation if DETAILS['mechanism'] == 'userpass': if 'username' not in proxy_conf: raise salt.exceptions.InvalidConfigError( 'Mechanism is set to \'userpass\', but no ' '\'username\' key found in proxy config.') if 'passwords' not in proxy_conf: raise salt.exceptions.InvalidConfigError( 'Mechanism is set to \'userpass\', but no ' '\'passwords\' key found in proxy config.') for key in ('username', 'passwords'): DETAILS[key] = proxy_conf[key] else: if 'domain' not in proxy_conf: raise salt.exceptions.InvalidConfigError( 'Mechanism is set to \'sspi\', but no ' '\'domain\' key found in proxy config.') if 'principal' not in proxy_conf: raise salt.exceptions.InvalidConfigError( 'Mechanism is set to \'sspi\', but no ' '\'principal\' key found in proxy config.') for key in ('domain', 'principal'): DETAILS[key] = proxy_conf[key] # Save optional DETAILS['protocol'] = proxy_conf.get('protocol') DETAILS['port'] = proxy_conf.get('port') # Test connection if DETAILS['mechanism'] == 'userpass': # Get the correct login details log.debug('Retrieving credentials and testing vCenter connection for ' 'mehchanism \'userpass\'') try: username, password = find_credentials() DETAILS['password'] = password except salt.exceptions.SaltSystemExit as err: log.critical('Error: %s', err) return False return True def ping(): ''' Returns True. CLI Example: .. code-block:: bash salt esx-cluster test.ping ''' return True def shutdown(): ''' Shutdown the connection to the proxy device. For this proxy, shutdown is a no-op. ''' log.debug('esxcluster proxy shutdown() called...') def get_details(): ''' Function that returns the cached details ''' return DETAILS
saltstack/salt
salt/roster/flat.py
targets
python
def targets(tgt, tgt_type='glob', **kwargs): ''' Return the targets from the flat yaml file, checks opts for location but defaults to /etc/salt/roster ''' template = get_roster_file(__opts__) rend = salt.loader.render(__opts__, {}) raw = compile_template(template, rend, __opts__['renderer'], __opts__['renderer_blacklist'], __opts__['renderer_whitelist'], mask_value='passw*', **kwargs) conditioned_raw = {} for minion in raw: conditioned_raw[six.text_type(minion)] = salt.config.apply_sdb(raw[minion]) return __utils__['roster_matcher.targets'](conditioned_raw, tgt, tgt_type, 'ipv4')
Return the targets from the flat yaml file, checks opts for location but defaults to /etc/salt/roster
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/roster/flat.py#L18-L36
[ "def compile_template(template,\n renderers,\n default,\n blacklist,\n whitelist,\n saltenv='base',\n sls='',\n input_data='',\n **kwargs):\n '''\n Take the path to a template and return the high data structure\n derived from the template.\n\n Helpers:\n\n :param mask_value:\n Mask value for debugging purposes (prevent sensitive information etc)\n example: \"mask_value=\"pass*\". All \"passwd\", \"password\", \"pass\" will\n be masked (as text).\n '''\n\n # if any error occurs, we return an empty dictionary\n ret = {}\n\n log.debug('compile template: %s', template)\n\n if 'env' in kwargs:\n # \"env\" is not supported; Use \"saltenv\".\n kwargs.pop('env')\n\n if template != ':string:':\n # Template was specified incorrectly\n if not isinstance(template, six.string_types):\n log.error('Template was specified incorrectly: %s', template)\n return ret\n # Template does not exist\n if not os.path.isfile(template):\n log.error('Template does not exist: %s', template)\n return ret\n # Template is an empty file\n if salt.utils.files.is_empty(template):\n log.debug('Template is an empty file: %s', template)\n return ret\n\n with codecs.open(template, encoding=SLS_ENCODING) as ifile:\n # data input to the first render function in the pipe\n input_data = ifile.read()\n if not input_data.strip():\n # Template is nothing but whitespace\n log.error('Template is nothing but whitespace: %s', template)\n return ret\n\n # Get the list of render funcs in the render pipe line.\n render_pipe = template_shebang(template, renderers, default, blacklist, whitelist, input_data)\n\n windows_newline = '\\r\\n' in input_data\n\n input_data = StringIO(input_data)\n for render, argline in render_pipe:\n if salt.utils.stringio.is_readable(input_data):\n input_data.seek(0) # pylint: disable=no-member\n render_kwargs = dict(renderers=renderers, tmplpath=template)\n render_kwargs.update(kwargs)\n if argline:\n render_kwargs['argline'] = argline\n start = time.time()\n ret = render(input_data, saltenv, sls, **render_kwargs)\n log.profile(\n 'Time (in seconds) to render \\'%s\\' using \\'%s\\' renderer: %s',\n template,\n render.__module__.split('.')[-1],\n time.time() - start\n )\n if ret is None:\n # The file is empty or is being written elsewhere\n time.sleep(0.01)\n ret = render(input_data, saltenv, sls, **render_kwargs)\n input_data = ret\n if log.isEnabledFor(logging.GARBAGE): # pylint: disable=no-member\n # If ret is not a StringIO (which means it was rendered using\n # yaml, mako, or another engine which renders to a data\n # structure) we don't want to log this.\n if salt.utils.stringio.is_readable(ret):\n log.debug('Rendered data from file: %s:\\n%s', template,\n salt.utils.sanitizers.mask_args_value(salt.utils.data.decode(ret.read()),\n kwargs.get('mask_value'))) # pylint: disable=no-member\n ret.seek(0) # pylint: disable=no-member\n\n # Preserve newlines from original template\n if windows_newline:\n if salt.utils.stringio.is_readable(ret):\n is_stringio = True\n contents = ret.read()\n else:\n is_stringio = False\n contents = ret\n\n if isinstance(contents, six.string_types):\n if '\\r\\n' not in contents:\n contents = contents.replace('\\n', '\\r\\n')\n ret = StringIO(contents) if is_stringio else contents\n else:\n if is_stringio:\n ret.seek(0)\n return ret\n", "def get_roster_file(options):\n '''\n Find respective roster file.\n\n :param options:\n :return:\n '''\n template = None\n # The __disable_custom_roster is always True if Salt SSH Client comes\n # from Salt API. In that case no way to define own 'roster_file', instead\n # this file needs to be chosen from already validated rosters\n # (see /etc/salt/master config).\n if options.get('__disable_custom_roster') and options.get('roster_file'):\n roster = options.get('roster_file').strip('/')\n for roster_location in options.get('rosters'):\n r_file = os.path.join(roster_location, roster)\n if os.path.isfile(r_file):\n template = r_file\n break\n del options['roster_file']\n\n if not template:\n if options.get('roster_file'):\n template = options.get('roster_file')\n elif 'config_dir' in options.get('__master_opts__', {}):\n template = os.path.join(options['__master_opts__']['config_dir'],\n 'roster')\n elif 'config_dir' in options:\n template = os.path.join(options['config_dir'], 'roster')\n else:\n template = os.path.join(salt.syspaths.CONFIG_DIR, 'roster')\n\n if not os.path.isfile(template):\n raise IOError('Roster file \"{0}\" not found'.format(template))\n\n if not os.access(template, os.R_OK):\n raise IOError('Access denied to roster \"{0}\"'.format(template))\n\n return template\n", "def apply_sdb(opts, sdb_opts=None):\n '''\n Recurse for sdb:// links for opts\n '''\n # Late load of SDB to keep CLI light\n import salt.utils.sdb\n if sdb_opts is None:\n sdb_opts = opts\n if isinstance(sdb_opts, six.string_types) and sdb_opts.startswith('sdb://'):\n return salt.utils.sdb.sdb_get(sdb_opts, opts)\n elif isinstance(sdb_opts, dict):\n for key, value in six.iteritems(sdb_opts):\n if value is None:\n continue\n sdb_opts[key] = apply_sdb(opts, value)\n elif isinstance(sdb_opts, list):\n for key, value in enumerate(sdb_opts):\n if value is None:\n continue\n sdb_opts[key] = apply_sdb(opts, value)\n\n return sdb_opts\n" ]
# -*- coding: utf-8 -*- ''' Read in the roster from a flat file using the renderer system ''' from __future__ import absolute_import, print_function, unicode_literals # Import Salt libs import salt.loader import salt.config from salt.ext import six from salt.template import compile_template from salt.roster import get_roster_file import logging log = logging.getLogger(__name__)
saltstack/salt
salt/cli/key.py
SaltKey.run
python
def run(self): ''' Execute salt-key ''' import salt.key self.parse_args() self.setup_logfile_logger() verify_log(self.config) key = salt.key.KeyCLI(self.config) if check_user(self.config['user']): key.run()
Execute salt-key
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cli/key.py#L14-L26
[ "def check_user(user):\n '''\n Check user and assign process uid/gid.\n '''\n if salt.utils.platform.is_windows():\n return True\n if user == salt.utils.user.get_user():\n return True\n import pwd # after confirming not running Windows\n try:\n pwuser = pwd.getpwnam(user)\n try:\n if hasattr(os, 'initgroups'):\n os.initgroups(user, pwuser.pw_gid) # pylint: disable=minimum-python-version\n else:\n os.setgroups(salt.utils.user.get_gid_list(user, include_default=False))\n os.setgid(pwuser.pw_gid)\n os.setuid(pwuser.pw_uid)\n\n # We could just reset the whole environment but let's just override\n # the variables we can get from pwuser\n if 'HOME' in os.environ:\n os.environ['HOME'] = pwuser.pw_dir\n\n if 'SHELL' in os.environ:\n os.environ['SHELL'] = pwuser.pw_shell\n\n for envvar in ('USER', 'LOGNAME'):\n if envvar in os.environ:\n os.environ[envvar] = pwuser.pw_name\n\n except OSError:\n msg = 'Salt configured to run as user \"{0}\" but unable to switch.'\n msg = msg.format(user)\n if is_console_configured():\n log.critical(msg)\n else:\n sys.stderr.write(\"CRITICAL: {0}\\n\".format(msg))\n return False\n except KeyError:\n msg = 'User not found: \"{0}\"'.format(user)\n if is_console_configured():\n log.critical(msg)\n else:\n sys.stderr.write(\"CRITICAL: {0}\\n\".format(msg))\n return False\n return True\n", "def verify_log(opts):\n '''\n If an insecre logging configuration is found, show a warning\n '''\n level = LOG_LEVELS.get(str(opts.get('log_level')).lower(), logging.NOTSET)\n\n if level < logging.INFO:\n log.warning('Insecure logging configuration detected! Sensitive data may be logged.')\n", "def run(self):\n '''\n Run the logic for saltkey\n '''\n self._update_opts()\n cmd = self.opts['fun']\n\n veri = None\n ret = None\n try:\n if cmd in ('accept', 'reject', 'delete'):\n ret = self._run_cmd('name_match')\n if not isinstance(ret, dict):\n salt.output.display_output(ret, 'key', opts=self.opts)\n return ret\n ret = self._filter_ret(cmd, ret)\n if not ret:\n self._print_no_match(cmd, self.opts['match'])\n return\n print('The following keys are going to be {0}ed:'.format(cmd.rstrip('e')))\n salt.output.display_output(ret, 'key', opts=self.opts)\n\n if not self.opts.get('yes', False):\n try:\n if cmd.startswith('delete'):\n veri = input('Proceed? [N/y] ')\n if not veri:\n veri = 'n'\n else:\n veri = input('Proceed? [n/Y] ')\n if not veri:\n veri = 'y'\n except KeyboardInterrupt:\n raise SystemExit(\"\\nExiting on CTRL-c\")\n # accept/reject/delete the same keys we're printed to the user\n self.opts['match_dict'] = ret\n self.opts.pop('match', None)\n list_ret = ret\n\n if veri is None or veri.lower().startswith('y'):\n ret = self._run_cmd(cmd)\n if cmd in ('accept', 'reject', 'delete'):\n if cmd == 'delete':\n ret = list_ret\n for minions in ret.values():\n for minion in minions:\n print('Key for minion {0} {1}ed.'.format(minion,\n cmd.rstrip('e')))\n elif isinstance(ret, dict):\n salt.output.display_output(ret, 'key', opts=self.opts)\n else:\n salt.output.display_output({'return': ret}, 'key', opts=self.opts)\n except salt.exceptions.SaltException as exc:\n ret = '{0}'.format(exc)\n if not self.opts.get('quiet', False):\n salt.output.display_output(ret, 'nested', self.opts)\n return ret\n" ]
class SaltKey(salt.utils.parsers.SaltKeyOptionParser): ''' Initialize the Salt key manager '''
saltstack/salt
salt/sdb/tism.py
get
python
def get(key, service=None, profile=None): # pylint: disable=W0613 ''' Get a decrypted secret from the tISMd API ''' if not profile.get('url') or not profile.get('token'): raise SaltConfigurationError("url and/or token missing from the tism sdb profile") request = {"token": profile['token'], "encsecret": key} result = http.query( profile['url'], method='POST', data=salt.utils.json.dumps(request), ) decrypted = result.get('body') if not decrypted: log.warning( 'tism.get sdb decryption request failed with error %s', result.get('error', 'unknown') ) return 'ERROR' + six.text_type(result.get('status', 'unknown')) return decrypted
Get a decrypted secret from the tISMd API
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/sdb/tism.py#L53-L78
null
# -*- coding: utf-8 -*- ''' tISM - the Immutable Secrets Manager SDB Module :maintainer: tISM :maturity: New :platform: all .. versionadded:: 2017.7.0 This module will decrypt PGP encrypted secrets against a tISM server. .. code:: sdb://<profile>/<encrypted secret> sdb://tism/hQEMAzJ+GfdAB3KqAQf9E3cyvrPEWR1sf1tMvH0nrJ0bZa9kDFLPxvtwAOqlRiNp0F7IpiiVRF+h+sW5Mb4ffB1TElMzQ+/G5ptd6CjmgBfBsuGeajWmvLEi4lC6/9v1rYGjjLeOCCcN4Dl5AHlxUUaSrxB8akTDvSAnPvGhtRTZqDlltl5UEHsyYXM8RaeCrBw5Or1yvC9Ctx2saVp3xmALQvyhzkUv5pTb1mH0I9Z7E0ian07ZUOD+pVacDAf1oQcPpqkeNVTQQ15EP0fDuvnW+a0vxeLhkbFLfnwqhqEsvFxVFLHVLcs2ffE5cceeOMtVo7DS9fCtkdZr5hR7a+86n4hdKfwDMFXiBwSIPMkmY980N/H30L/r50+CBkuI/u4M2pXDcMYsvvt4ajCbJn91qaQ7BDI= A profile must be setup in the minion configuration or pillar. If you want to use sdb in a runner or pillar you must also place a profile in the master configuration. .. code-block:: yaml tism: driver: tism url: https://my.tismd:8080/decrypt token: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhZG1pbiI6MSwiZXhwIjoxNTg1MTExNDYwLCJqdGkiOiI3NnA5cWNiMWdtdmw4Iiwia2V5cyI6WyJBTEwiXX0.RtAhG6Uorf5xnSf4Ya_GwJnoHkCsql4r1_hiOeDSLzo ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import logging # Import Salt libs import salt.utils.json import salt.utils.http as http from salt.ext import six from salt.exceptions import SaltConfigurationError log = logging.getLogger(__name__) __virtualname__ = "tism" def __virtual__(): ''' This module has no other system dependencies ''' return __virtualname__
saltstack/salt
salt/pillar/postgres.py
ext_pillar
python
def ext_pillar(minion_id, pillar, *args, **kwargs): ''' Execute queries against POSTGRES, merge and return as a dict ''' return POSTGRESExtPillar().fetch(minion_id, pillar, *args, **kwargs)
Execute queries against POSTGRES, merge and return as a dict
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/pillar/postgres.py#L112-L119
null
# -*- coding: utf-8 -*- ''' Retrieve Pillar data by doing a postgres query .. versionadded:: 2017.7.0 :maturity: new :depends: psycopg2 :platform: all Complete Example ================ .. code-block:: yaml postgres: user: 'salt' pass: 'super_secret_password' db: 'salt_db' ext_pillar: - postgres: fromdb: query: 'SELECT col1,col2,col3,col4,col5,col6,col7 FROM some_random_table WHERE minion_pattern LIKE %s' depth: 5 as_list: True with_lists: [1,3] ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs from contextlib import contextmanager import logging # Import Salt libs from salt.pillar.sql_base import SqlBaseExtPillar # Set up logging log = logging.getLogger(__name__) # Import third party libs try: import psycopg2 HAS_POSTGRES = True except ImportError: HAS_POSTGRES = False def __virtual__(): if not HAS_POSTGRES: return False return True class POSTGRESExtPillar(SqlBaseExtPillar): ''' This class receives and processes the database rows from POSTGRES. ''' @classmethod def _db_name(cls): return 'POSTGRES' def _get_options(self): ''' Returns options used for the POSTGRES connection. ''' defaults = {'host': 'localhost', 'user': 'salt', 'pass': 'salt', 'db': 'salt', 'port': 5432} _options = {} _opts = __opts__.get('postgres', {}) for attr in defaults: if attr not in _opts: log.debug('Using default for POSTGRES %s', attr) _options[attr] = defaults[attr] continue _options[attr] = _opts[attr] return _options @contextmanager def _get_cursor(self): ''' Yield a POSTGRES cursor ''' _options = self._get_options() conn = psycopg2.connect(host=_options['host'], user=_options['user'], password=_options['pass'], dbname=_options['db'], port=_options['port']) cursor = conn.cursor() try: yield cursor log.debug('Connected to POSTGRES DB') except psycopg2.DatabaseError as err: log.exception('Error in ext_pillar POSTGRES: %s', err.args) finally: conn.close() def extract_queries(self, args, kwargs): ''' This function normalizes the config block into a set of queries we can use. The return is a list of consistently laid out dicts. ''' return super(POSTGRESExtPillar, self).extract_queries(args, kwargs)
saltstack/salt
salt/pillar/postgres.py
POSTGRESExtPillar._get_cursor
python
def _get_cursor(self): ''' Yield a POSTGRES cursor ''' _options = self._get_options() conn = psycopg2.connect(host=_options['host'], user=_options['user'], password=_options['pass'], dbname=_options['db'], port=_options['port']) cursor = conn.cursor() try: yield cursor log.debug('Connected to POSTGRES DB') except psycopg2.DatabaseError as err: log.exception('Error in ext_pillar POSTGRES: %s', err.args) finally: conn.close()
Yield a POSTGRES cursor
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/pillar/postgres.py#L85-L102
null
class POSTGRESExtPillar(SqlBaseExtPillar): ''' This class receives and processes the database rows from POSTGRES. ''' @classmethod def _db_name(cls): return 'POSTGRES' def _get_options(self): ''' Returns options used for the POSTGRES connection. ''' defaults = {'host': 'localhost', 'user': 'salt', 'pass': 'salt', 'db': 'salt', 'port': 5432} _options = {} _opts = __opts__.get('postgres', {}) for attr in defaults: if attr not in _opts: log.debug('Using default for POSTGRES %s', attr) _options[attr] = defaults[attr] continue _options[attr] = _opts[attr] return _options @contextmanager def extract_queries(self, args, kwargs): ''' This function normalizes the config block into a set of queries we can use. The return is a list of consistently laid out dicts. ''' return super(POSTGRESExtPillar, self).extract_queries(args, kwargs)
saltstack/salt
salt/pillar/postgres.py
POSTGRESExtPillar.extract_queries
python
def extract_queries(self, args, kwargs): ''' This function normalizes the config block into a set of queries we can use. The return is a list of consistently laid out dicts. ''' return super(POSTGRESExtPillar, self).extract_queries(args, kwargs)
This function normalizes the config block into a set of queries we can use. The return is a list of consistently laid out dicts.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/pillar/postgres.py#L104-L109
[ "def extract_queries(self, args, kwargs):\n '''\n This function normalizes the config block into a set of queries we\n can use. The return is a list of consistently laid out dicts.\n '''\n # Please note the function signature is NOT an error. Neither args, nor\n # kwargs should have asterisks. We are passing in a list and dict,\n # rather than receiving variable args. Adding asterisks WILL BREAK the\n # function completely.\n\n # First, this is the query buffer. Contains lists of [base,sql]\n qbuffer = []\n\n # Add on the non-keywords...\n qbuffer.extend([[None, s] for s in args])\n\n # And then the keywords...\n # They aren't in definition order, but they can't conflict each other.\n klist = list(kwargs.keys())\n klist.sort()\n qbuffer.extend([[k, kwargs[k]] for k in klist])\n\n # Filter out values that don't have queries.\n qbuffer = [x for x in qbuffer if (\n (isinstance(x[1], six.string_types) and len(x[1]))\n or\n (isinstance(x[1], (list, tuple)) and (len(x[1]) > 0) and x[1][0])\n or\n (isinstance(x[1], dict) and 'query' in x[1] and len(x[1]['query']))\n )]\n\n # Next, turn the whole buffer into full dicts.\n for qb in qbuffer:\n defaults = {'query': '',\n 'depth': 0,\n 'as_list': False,\n 'with_lists': None,\n 'ignore_null': False\n }\n if isinstance(qb[1], six.string_types):\n defaults['query'] = qb[1]\n elif isinstance(qb[1], (list, tuple)):\n defaults['query'] = qb[1][0]\n if len(qb[1]) > 1:\n defaults['depth'] = qb[1][1]\n # May set 'as_list' from qb[1][2].\n else:\n defaults.update(qb[1])\n if defaults['with_lists'] and isinstance(defaults['with_lists'], six.string_types):\n defaults['with_lists'] = [\n int(i) for i in defaults['with_lists'].split(',')\n ]\n qb[1] = defaults\n\n return qbuffer\n" ]
class POSTGRESExtPillar(SqlBaseExtPillar): ''' This class receives and processes the database rows from POSTGRES. ''' @classmethod def _db_name(cls): return 'POSTGRES' def _get_options(self): ''' Returns options used for the POSTGRES connection. ''' defaults = {'host': 'localhost', 'user': 'salt', 'pass': 'salt', 'db': 'salt', 'port': 5432} _options = {} _opts = __opts__.get('postgres', {}) for attr in defaults: if attr not in _opts: log.debug('Using default for POSTGRES %s', attr) _options[attr] = defaults[attr] continue _options[attr] = _opts[attr] return _options @contextmanager def _get_cursor(self): ''' Yield a POSTGRES cursor ''' _options = self._get_options() conn = psycopg2.connect(host=_options['host'], user=_options['user'], password=_options['pass'], dbname=_options['db'], port=_options['port']) cursor = conn.cursor() try: yield cursor log.debug('Connected to POSTGRES DB') except psycopg2.DatabaseError as err: log.exception('Error in ext_pillar POSTGRES: %s', err.args) finally: conn.close()
saltstack/salt
salt/modules/ps.py
_get_proc_cmdline
python
def _get_proc_cmdline(proc): ''' Returns the cmdline of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.cmdline() if PSUTIL2 else proc.cmdline) except (psutil.NoSuchProcess, psutil.AccessDenied): return []
Returns the cmdline of a Process instance. It's backward compatible with < 2.0 versions of psutil.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/ps.py#L50-L59
[ "def decode(data, encoding=None, errors='strict', keep=False,\n normalize=False, preserve_dict_class=False, preserve_tuples=False,\n to_str=False):\n '''\n Generic function which will decode whichever type is passed, if necessary.\n Optionally use to_str=True to ensure strings are str types and not unicode\n on Python 2.\n\n If `strict` is True, and `keep` is False, and we fail to decode, a\n UnicodeDecodeError will be raised. Passing `keep` as True allows for the\n original value to silently be returned in cases where decoding fails. This\n can be useful for cases where the data passed to this function is likely to\n contain binary blobs, such as in the case of cp.recv.\n\n If `normalize` is True, then unicodedata.normalize() will be used to\n normalize unicode strings down to a single code point per glyph. It is\n recommended not to normalize unless you know what you're doing. For\n instance, if `data` contains a dictionary, it is possible that normalizing\n will lead to data loss because the following two strings will normalize to\n the same value:\n\n - u'\\\\u044f\\\\u0438\\\\u0306\\\\u0446\\\\u0430.txt'\n - u'\\\\u044f\\\\u0439\\\\u0446\\\\u0430.txt'\n\n One good use case for normalization is in the test suite. For example, on\n some platforms such as Mac OS, os.listdir() will produce the first of the\n two strings above, in which \"й\" is represented as two code points (i.e. one\n for the base character, and one for the breve mark). Normalizing allows for\n a more reliable test case.\n '''\n _decode_func = salt.utils.stringutils.to_unicode \\\n if not to_str \\\n else salt.utils.stringutils.to_str\n if isinstance(data, Mapping):\n return decode_dict(data, encoding, errors, keep, normalize,\n preserve_dict_class, preserve_tuples, to_str)\n elif isinstance(data, list):\n return decode_list(data, encoding, errors, keep, normalize,\n preserve_dict_class, preserve_tuples, to_str)\n elif isinstance(data, tuple):\n return decode_tuple(data, encoding, errors, keep, normalize,\n preserve_dict_class, to_str) \\\n if preserve_tuples \\\n else decode_list(data, encoding, errors, keep, normalize,\n preserve_dict_class, preserve_tuples, to_str)\n else:\n try:\n data = _decode_func(data, encoding, errors, normalize)\n except TypeError:\n # to_unicode raises a TypeError when input is not a\n # string/bytestring/bytearray. This is expected and simply means we\n # are going to leave the value as-is.\n pass\n except UnicodeDecodeError:\n if not keep:\n raise\n return data\n", "def cmdline(self):\n return self._cmdline\n" ]
# -*- coding: utf-8 -*- ''' A salt interface to psutil, a system and process library. See http://code.google.com/p/psutil. :depends: - psutil Python module, version 0.3.0 or later - python-utmp package (optional) ''' # Import python libs from __future__ import absolute_import, unicode_literals, print_function import time import datetime import re # Import salt libs import salt.utils.data from salt.exceptions import SaltInvocationError, CommandExecutionError # Import third party libs import salt.utils.decorators.path from salt.ext import six # pylint: disable=import-error try: import salt.utils.psutil_compat as psutil HAS_PSUTIL = True PSUTIL2 = getattr(psutil, 'version_info', ()) >= (2, 0) except ImportError: HAS_PSUTIL = False # pylint: enable=import-error def __virtual__(): if not HAS_PSUTIL: return False, 'The ps module cannot be loaded: python module psutil not installed.' # Functions and attributes used in this execution module seem to have been # added as of psutil 0.3.0, from an inspection of the source code. Only # make this module available if the version of psutil is >= 0.3.0. Note # that this may need to be tweaked if we find post-0.3.0 versions which # also have problems running the functions in this execution module, but # most distributions have already moved to later versions (for example, # as of Dec. 2013 EPEL is on 0.6.1, Debian 7 is on 0.5.1, etc.). if psutil.version_info >= (0, 3, 0): return True return (False, 'The ps execution module cannot be loaded: the psutil python module version {0} is less than 0.3.0'.format(psutil.version_info)) def _get_proc_create_time(proc): ''' Returns the create_time of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.create_time() if PSUTIL2 else proc.create_time) except (psutil.NoSuchProcess, psutil.AccessDenied): return None def _get_proc_name(proc): ''' Returns the name of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.name() if PSUTIL2 else proc.name) except (psutil.NoSuchProcess, psutil.AccessDenied): return [] def _get_proc_status(proc): ''' Returns the status of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.status() if PSUTIL2 else proc.status) except (psutil.NoSuchProcess, psutil.AccessDenied): return None def _get_proc_username(proc): ''' Returns the username of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.username() if PSUTIL2 else proc.username) except (psutil.NoSuchProcess, psutil.AccessDenied, KeyError): return None def _get_proc_pid(proc): ''' Returns the pid of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' return proc.pid def top(num_processes=5, interval=3): ''' Return a list of top CPU consuming processes during the interval. num_processes = return the top N CPU consuming processes interval = the number of seconds to sample CPU usage over CLI Examples: .. code-block:: bash salt '*' ps.top salt '*' ps.top 5 10 ''' result = [] start_usage = {} for pid in psutil.pids(): try: process = psutil.Process(pid) user, system = process.cpu_times() except ValueError: user, system, _, _ = process.cpu_times() except psutil.NoSuchProcess: continue start_usage[process] = user + system time.sleep(interval) usage = set() for process, start in six.iteritems(start_usage): try: user, system = process.cpu_times() except ValueError: user, system, _, _ = process.cpu_times() except psutil.NoSuchProcess: continue now = user + system diff = now - start usage.add((diff, process)) for idx, (diff, process) in enumerate(reversed(sorted(usage))): if num_processes and idx >= num_processes: break if not _get_proc_cmdline(process): cmdline = _get_proc_name(process) else: cmdline = _get_proc_cmdline(process) info = {'cmd': cmdline, 'user': _get_proc_username(process), 'status': _get_proc_status(process), 'pid': _get_proc_pid(process), 'create_time': _get_proc_create_time(process), 'cpu': {}, 'mem': {}, } for key, value in six.iteritems(process.cpu_times()._asdict()): info['cpu'][key] = value for key, value in six.iteritems(process.memory_info()._asdict()): info['mem'][key] = value result.append(info) return result def get_pid_list(): ''' Return a list of process ids (PIDs) for all running processes. CLI Example: .. code-block:: bash salt '*' ps.get_pid_list ''' return psutil.pids() def proc_info(pid, attrs=None): ''' Return a dictionary of information for a process id (PID). CLI Example: .. code-block:: bash salt '*' ps.proc_info 2322 salt '*' ps.proc_info 2322 attrs='["pid", "name"]' pid PID of process to query. attrs Optional list of desired process attributes. The list of possible attributes can be found here: http://pythonhosted.org/psutil/#psutil.Process ''' try: proc = psutil.Process(pid) return proc.as_dict(attrs) except (psutil.NoSuchProcess, psutil.AccessDenied, AttributeError) as exc: raise CommandExecutionError(exc) def kill_pid(pid, signal=15): ''' Kill a process by PID. .. code-block:: bash salt 'minion' ps.kill_pid pid [signal=signal_number] pid PID of process to kill. signal Signal to send to the process. See manpage entry for kill for possible values. Default: 15 (SIGTERM). **Example:** Send SIGKILL to process with PID 2000: .. code-block:: bash salt 'minion' ps.kill_pid 2000 signal=9 ''' try: psutil.Process(pid).send_signal(signal) return True except psutil.NoSuchProcess: return False def pkill(pattern, user=None, signal=15, full=False): ''' Kill processes matching a pattern. .. code-block:: bash salt '*' ps.pkill pattern [user=username] [signal=signal_number] \\ [full=(true|false)] pattern Pattern to search for in the process list. user Limit matches to the given username. Default: All users. signal Signal to send to the process(es). See manpage entry for kill for possible values. Default: 15 (SIGTERM). full A boolean value indicating whether only the name of the command or the full command line should be matched against the pattern. **Examples:** Send SIGHUP to all httpd processes on all 'www' minions: .. code-block:: bash salt 'www.*' ps.pkill httpd signal=1 Send SIGKILL to all bash processes owned by user 'tom': .. code-block:: bash salt '*' ps.pkill bash signal=9 user=tom ''' killed = [] for proc in psutil.process_iter(): name_match = pattern in ' '.join(_get_proc_cmdline(proc)) if full \ else pattern in _get_proc_name(proc) user_match = True if user is None else user == _get_proc_username(proc) if name_match and user_match: try: proc.send_signal(signal) killed.append(_get_proc_pid(proc)) except psutil.NoSuchProcess: pass if not killed: return None else: return {'killed': killed} def pgrep(pattern, user=None, full=False, pattern_is_regex=False): ''' Return the pids for processes matching a pattern. If full is true, the full command line is searched for a match, otherwise only the name of the command is searched. .. code-block:: bash salt '*' ps.pgrep pattern [user=username] [full=(true|false)] pattern Pattern to search for in the process list. user Limit matches to the given username. Default: All users. full A boolean value indicating whether only the name of the command or the full command line should be matched against the pattern. pattern_is_regex This flag enables ps.pgrep to mirror the regex search functionality found in the pgrep command line utility. .. versionadded:: Neon **Examples:** Find all httpd processes on all 'www' minions: .. code-block:: bash salt 'www.*' ps.pgrep httpd Find all bash processes owned by user 'tom': .. code-block:: bash salt '*' ps.pgrep bash user=tom ''' procs = [] if pattern_is_regex: pattern = re.compile(str(pattern)) procs = [] for proc in psutil.process_iter(): if full: process_line = ' '.join(_get_proc_cmdline(proc)) else: process_line = _get_proc_name(proc) if pattern_is_regex: name_match = re.search(pattern, process_line) else: name_match = pattern in process_line user_match = True if user is None else user == _get_proc_username(proc) if name_match and user_match: procs.append(_get_proc_pid(proc)) return procs or None def cpu_percent(interval=0.1, per_cpu=False): ''' Return the percent of time the CPU is busy. interval the number of seconds to sample CPU usage over per_cpu if True return an array of CPU percent busy for each CPU, otherwise aggregate all percents into one number CLI Example: .. code-block:: bash salt '*' ps.cpu_percent ''' if per_cpu: result = list(psutil.cpu_percent(interval, True)) else: result = psutil.cpu_percent(interval) return result def cpu_times(per_cpu=False): ''' Return the percent of time the CPU spends in each state, e.g. user, system, idle, nice, iowait, irq, softirq. per_cpu if True return an array of percents for each CPU, otherwise aggregate all percents into one number CLI Example: .. code-block:: bash salt '*' ps.cpu_times ''' if per_cpu: result = [dict(times._asdict()) for times in psutil.cpu_times(True)] else: result = dict(psutil.cpu_times(per_cpu)._asdict()) return result def virtual_memory(): ''' .. versionadded:: 2014.7.0 Return a dict that describes statistics about system memory usage. .. note:: This function is only available in psutil version 0.6.0 and above. CLI Example: .. code-block:: bash salt '*' ps.virtual_memory ''' if psutil.version_info < (0, 6, 0): msg = 'virtual_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) return dict(psutil.virtual_memory()._asdict()) def swap_memory(): ''' .. versionadded:: 2014.7.0 Return a dict that describes swap memory statistics. .. note:: This function is only available in psutil version 0.6.0 and above. CLI Example: .. code-block:: bash salt '*' ps.swap_memory ''' if psutil.version_info < (0, 6, 0): msg = 'swap_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) return dict(psutil.swap_memory()._asdict()) def disk_partitions(all=False): ''' Return a list of disk partitions and their device, mount point, and filesystem type. all if set to False, only return local, physical partitions (hard disk, USB, CD/DVD partitions). If True, return all filesystems. CLI Example: .. code-block:: bash salt '*' ps.disk_partitions ''' result = [dict(partition._asdict()) for partition in psutil.disk_partitions(all)] return result def disk_usage(path): ''' Given a path, return a dict listing the total available space as well as the free space, and used space. CLI Example: .. code-block:: bash salt '*' ps.disk_usage /home ''' return dict(psutil.disk_usage(path)._asdict()) def disk_partition_usage(all=False): ''' Return a list of disk partitions plus the mount point, filesystem and usage statistics. CLI Example: .. code-block:: bash salt '*' ps.disk_partition_usage ''' result = disk_partitions(all) for partition in result: partition.update(disk_usage(partition['mountpoint'])) return result def total_physical_memory(): ''' Return the total number of bytes of physical memory. CLI Example: .. code-block:: bash salt '*' ps.total_physical_memory ''' if psutil.version_info < (0, 6, 0): msg = 'virtual_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) try: return psutil.virtual_memory().total except AttributeError: # TOTAL_PHYMEM is deprecated but with older psutil versions this is # needed as a fallback. return psutil.TOTAL_PHYMEM def num_cpus(): ''' Return the number of CPUs. CLI Example: .. code-block:: bash salt '*' ps.num_cpus ''' try: return psutil.cpu_count() except AttributeError: # NUM_CPUS is deprecated but with older psutil versions this is needed # as a fallback. return psutil.NUM_CPUS def boot_time(time_format=None): ''' Return the boot time in number of seconds since the epoch began. CLI Example: time_format Optionally specify a `strftime`_ format string. Use ``time_format='%c'`` to get a nicely-formatted locale specific date and time (i.e. ``Fri May 2 19:08:32 2014``). .. _strftime: https://docs.python.org/2/library/datetime.html#strftime-strptime-behavior .. versionadded:: 2014.1.4 .. code-block:: bash salt '*' ps.boot_time ''' try: b_time = int(psutil.boot_time()) except AttributeError: # get_boot_time() has been removed in newer psutil versions, and has # been replaced by boot_time() which provides the same information. b_time = int(psutil.boot_time()) if time_format: # Load epoch timestamp as a datetime.datetime object b_time = datetime.datetime.fromtimestamp(b_time) try: return b_time.strftime(time_format) except TypeError as exc: raise SaltInvocationError('Invalid format string: {0}'.format(exc)) return b_time def network_io_counters(interface=None): ''' Return network I/O statistics. CLI Example: .. code-block:: bash salt '*' ps.network_io_counters salt '*' ps.network_io_counters interface=eth0 ''' if not interface: return dict(psutil.net_io_counters()._asdict()) else: stats = psutil.net_io_counters(pernic=True) if interface in stats: return dict(stats[interface]._asdict()) else: return False def disk_io_counters(device=None): ''' Return disk I/O statistics. CLI Example: .. code-block:: bash salt '*' ps.disk_io_counters salt '*' ps.disk_io_counters device=sda1 ''' if not device: return dict(psutil.disk_io_counters()._asdict()) else: stats = psutil.disk_io_counters(perdisk=True) if device in stats: return dict(stats[device]._asdict()) else: return False def get_users(): ''' Return logged-in users. CLI Example: .. code-block:: bash salt '*' ps.get_users ''' try: recs = psutil.users() return [dict(x._asdict()) for x in recs] except AttributeError: # get_users is only present in psutil > v0.5.0 # try utmp try: import utmp # pylint: disable=import-error result = [] while True: rec = utmp.utmpaccess.getutent() if rec is None: return result elif rec[0] == 7: started = rec[8] if isinstance(started, tuple): started = started[0] result.append({'name': rec[4], 'terminal': rec[2], 'started': started, 'host': rec[5]}) except ImportError: return False def lsof(name): ''' Retrieve the lsof information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.lsof apache2 ''' sanitize_name = six.text_type(name) lsof_infos = __salt__['cmd.run']("lsof -c " + sanitize_name) ret = [] ret.extend([sanitize_name, lsof_infos]) return ret @salt.utils.decorators.path.which('netstat') def netstat(name): ''' Retrieve the netstat information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.netstat apache2 ''' sanitize_name = six.text_type(name) netstat_infos = __salt__['cmd.run']("netstat -nap") found_infos = [] ret = [] for info in netstat_infos.splitlines(): if info.find(sanitize_name) != -1: found_infos.append(info) ret.extend([sanitize_name, found_infos]) return ret @salt.utils.decorators.path.which('ss') def ss(name): ''' Retrieve the ss information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.ss apache2 .. versionadded:: 2016.11.6 ''' sanitize_name = six.text_type(name) ss_infos = __salt__['cmd.run']("ss -neap") found_infos = [] ret = [] for info in ss_infos.splitlines(): if info.find(sanitize_name) != -1: found_infos.append(info) ret.extend([sanitize_name, found_infos]) return ret def psaux(name): ''' Retrieve information corresponding to a "ps aux" filtered with the given pattern. It could be just a name or a regular expression (using python search from "re" module). CLI Example: .. code-block:: bash salt '*' ps.psaux www-data.+apache2 ''' sanitize_name = six.text_type(name) pattern = re.compile(sanitize_name) salt_exception_pattern = re.compile("salt.+ps.psaux.+") ps_aux = __salt__['cmd.run']("ps aux") found_infos = [] ret = [] nb_lines = 0 for info in ps_aux.splitlines(): found = pattern.search(info) if found is not None: # remove 'salt' command from results if not salt_exception_pattern.search(info): nb_lines += 1 found_infos.append(info) pid_count = six.text_type(nb_lines) + " occurence(s)." ret = [] ret.extend([sanitize_name, found_infos, pid_count]) return ret
saltstack/salt
salt/modules/ps.py
_get_proc_create_time
python
def _get_proc_create_time(proc): ''' Returns the create_time of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.create_time() if PSUTIL2 else proc.create_time) except (psutil.NoSuchProcess, psutil.AccessDenied): return None
Returns the create_time of a Process instance. It's backward compatible with < 2.0 versions of psutil.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/ps.py#L62-L71
[ "def decode(data, encoding=None, errors='strict', keep=False,\n normalize=False, preserve_dict_class=False, preserve_tuples=False,\n to_str=False):\n '''\n Generic function which will decode whichever type is passed, if necessary.\n Optionally use to_str=True to ensure strings are str types and not unicode\n on Python 2.\n\n If `strict` is True, and `keep` is False, and we fail to decode, a\n UnicodeDecodeError will be raised. Passing `keep` as True allows for the\n original value to silently be returned in cases where decoding fails. This\n can be useful for cases where the data passed to this function is likely to\n contain binary blobs, such as in the case of cp.recv.\n\n If `normalize` is True, then unicodedata.normalize() will be used to\n normalize unicode strings down to a single code point per glyph. It is\n recommended not to normalize unless you know what you're doing. For\n instance, if `data` contains a dictionary, it is possible that normalizing\n will lead to data loss because the following two strings will normalize to\n the same value:\n\n - u'\\\\u044f\\\\u0438\\\\u0306\\\\u0446\\\\u0430.txt'\n - u'\\\\u044f\\\\u0439\\\\u0446\\\\u0430.txt'\n\n One good use case for normalization is in the test suite. For example, on\n some platforms such as Mac OS, os.listdir() will produce the first of the\n two strings above, in which \"й\" is represented as two code points (i.e. one\n for the base character, and one for the breve mark). Normalizing allows for\n a more reliable test case.\n '''\n _decode_func = salt.utils.stringutils.to_unicode \\\n if not to_str \\\n else salt.utils.stringutils.to_str\n if isinstance(data, Mapping):\n return decode_dict(data, encoding, errors, keep, normalize,\n preserve_dict_class, preserve_tuples, to_str)\n elif isinstance(data, list):\n return decode_list(data, encoding, errors, keep, normalize,\n preserve_dict_class, preserve_tuples, to_str)\n elif isinstance(data, tuple):\n return decode_tuple(data, encoding, errors, keep, normalize,\n preserve_dict_class, to_str) \\\n if preserve_tuples \\\n else decode_list(data, encoding, errors, keep, normalize,\n preserve_dict_class, preserve_tuples, to_str)\n else:\n try:\n data = _decode_func(data, encoding, errors, normalize)\n except TypeError:\n # to_unicode raises a TypeError when input is not a\n # string/bytestring/bytearray. This is expected and simply means we\n # are going to leave the value as-is.\n pass\n except UnicodeDecodeError:\n if not keep:\n raise\n return data\n" ]
# -*- coding: utf-8 -*- ''' A salt interface to psutil, a system and process library. See http://code.google.com/p/psutil. :depends: - psutil Python module, version 0.3.0 or later - python-utmp package (optional) ''' # Import python libs from __future__ import absolute_import, unicode_literals, print_function import time import datetime import re # Import salt libs import salt.utils.data from salt.exceptions import SaltInvocationError, CommandExecutionError # Import third party libs import salt.utils.decorators.path from salt.ext import six # pylint: disable=import-error try: import salt.utils.psutil_compat as psutil HAS_PSUTIL = True PSUTIL2 = getattr(psutil, 'version_info', ()) >= (2, 0) except ImportError: HAS_PSUTIL = False # pylint: enable=import-error def __virtual__(): if not HAS_PSUTIL: return False, 'The ps module cannot be loaded: python module psutil not installed.' # Functions and attributes used in this execution module seem to have been # added as of psutil 0.3.0, from an inspection of the source code. Only # make this module available if the version of psutil is >= 0.3.0. Note # that this may need to be tweaked if we find post-0.3.0 versions which # also have problems running the functions in this execution module, but # most distributions have already moved to later versions (for example, # as of Dec. 2013 EPEL is on 0.6.1, Debian 7 is on 0.5.1, etc.). if psutil.version_info >= (0, 3, 0): return True return (False, 'The ps execution module cannot be loaded: the psutil python module version {0} is less than 0.3.0'.format(psutil.version_info)) def _get_proc_cmdline(proc): ''' Returns the cmdline of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.cmdline() if PSUTIL2 else proc.cmdline) except (psutil.NoSuchProcess, psutil.AccessDenied): return [] def _get_proc_name(proc): ''' Returns the name of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.name() if PSUTIL2 else proc.name) except (psutil.NoSuchProcess, psutil.AccessDenied): return [] def _get_proc_status(proc): ''' Returns the status of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.status() if PSUTIL2 else proc.status) except (psutil.NoSuchProcess, psutil.AccessDenied): return None def _get_proc_username(proc): ''' Returns the username of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.username() if PSUTIL2 else proc.username) except (psutil.NoSuchProcess, psutil.AccessDenied, KeyError): return None def _get_proc_pid(proc): ''' Returns the pid of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' return proc.pid def top(num_processes=5, interval=3): ''' Return a list of top CPU consuming processes during the interval. num_processes = return the top N CPU consuming processes interval = the number of seconds to sample CPU usage over CLI Examples: .. code-block:: bash salt '*' ps.top salt '*' ps.top 5 10 ''' result = [] start_usage = {} for pid in psutil.pids(): try: process = psutil.Process(pid) user, system = process.cpu_times() except ValueError: user, system, _, _ = process.cpu_times() except psutil.NoSuchProcess: continue start_usage[process] = user + system time.sleep(interval) usage = set() for process, start in six.iteritems(start_usage): try: user, system = process.cpu_times() except ValueError: user, system, _, _ = process.cpu_times() except psutil.NoSuchProcess: continue now = user + system diff = now - start usage.add((diff, process)) for idx, (diff, process) in enumerate(reversed(sorted(usage))): if num_processes and idx >= num_processes: break if not _get_proc_cmdline(process): cmdline = _get_proc_name(process) else: cmdline = _get_proc_cmdline(process) info = {'cmd': cmdline, 'user': _get_proc_username(process), 'status': _get_proc_status(process), 'pid': _get_proc_pid(process), 'create_time': _get_proc_create_time(process), 'cpu': {}, 'mem': {}, } for key, value in six.iteritems(process.cpu_times()._asdict()): info['cpu'][key] = value for key, value in six.iteritems(process.memory_info()._asdict()): info['mem'][key] = value result.append(info) return result def get_pid_list(): ''' Return a list of process ids (PIDs) for all running processes. CLI Example: .. code-block:: bash salt '*' ps.get_pid_list ''' return psutil.pids() def proc_info(pid, attrs=None): ''' Return a dictionary of information for a process id (PID). CLI Example: .. code-block:: bash salt '*' ps.proc_info 2322 salt '*' ps.proc_info 2322 attrs='["pid", "name"]' pid PID of process to query. attrs Optional list of desired process attributes. The list of possible attributes can be found here: http://pythonhosted.org/psutil/#psutil.Process ''' try: proc = psutil.Process(pid) return proc.as_dict(attrs) except (psutil.NoSuchProcess, psutil.AccessDenied, AttributeError) as exc: raise CommandExecutionError(exc) def kill_pid(pid, signal=15): ''' Kill a process by PID. .. code-block:: bash salt 'minion' ps.kill_pid pid [signal=signal_number] pid PID of process to kill. signal Signal to send to the process. See manpage entry for kill for possible values. Default: 15 (SIGTERM). **Example:** Send SIGKILL to process with PID 2000: .. code-block:: bash salt 'minion' ps.kill_pid 2000 signal=9 ''' try: psutil.Process(pid).send_signal(signal) return True except psutil.NoSuchProcess: return False def pkill(pattern, user=None, signal=15, full=False): ''' Kill processes matching a pattern. .. code-block:: bash salt '*' ps.pkill pattern [user=username] [signal=signal_number] \\ [full=(true|false)] pattern Pattern to search for in the process list. user Limit matches to the given username. Default: All users. signal Signal to send to the process(es). See manpage entry for kill for possible values. Default: 15 (SIGTERM). full A boolean value indicating whether only the name of the command or the full command line should be matched against the pattern. **Examples:** Send SIGHUP to all httpd processes on all 'www' minions: .. code-block:: bash salt 'www.*' ps.pkill httpd signal=1 Send SIGKILL to all bash processes owned by user 'tom': .. code-block:: bash salt '*' ps.pkill bash signal=9 user=tom ''' killed = [] for proc in psutil.process_iter(): name_match = pattern in ' '.join(_get_proc_cmdline(proc)) if full \ else pattern in _get_proc_name(proc) user_match = True if user is None else user == _get_proc_username(proc) if name_match and user_match: try: proc.send_signal(signal) killed.append(_get_proc_pid(proc)) except psutil.NoSuchProcess: pass if not killed: return None else: return {'killed': killed} def pgrep(pattern, user=None, full=False, pattern_is_regex=False): ''' Return the pids for processes matching a pattern. If full is true, the full command line is searched for a match, otherwise only the name of the command is searched. .. code-block:: bash salt '*' ps.pgrep pattern [user=username] [full=(true|false)] pattern Pattern to search for in the process list. user Limit matches to the given username. Default: All users. full A boolean value indicating whether only the name of the command or the full command line should be matched against the pattern. pattern_is_regex This flag enables ps.pgrep to mirror the regex search functionality found in the pgrep command line utility. .. versionadded:: Neon **Examples:** Find all httpd processes on all 'www' minions: .. code-block:: bash salt 'www.*' ps.pgrep httpd Find all bash processes owned by user 'tom': .. code-block:: bash salt '*' ps.pgrep bash user=tom ''' procs = [] if pattern_is_regex: pattern = re.compile(str(pattern)) procs = [] for proc in psutil.process_iter(): if full: process_line = ' '.join(_get_proc_cmdline(proc)) else: process_line = _get_proc_name(proc) if pattern_is_regex: name_match = re.search(pattern, process_line) else: name_match = pattern in process_line user_match = True if user is None else user == _get_proc_username(proc) if name_match and user_match: procs.append(_get_proc_pid(proc)) return procs or None def cpu_percent(interval=0.1, per_cpu=False): ''' Return the percent of time the CPU is busy. interval the number of seconds to sample CPU usage over per_cpu if True return an array of CPU percent busy for each CPU, otherwise aggregate all percents into one number CLI Example: .. code-block:: bash salt '*' ps.cpu_percent ''' if per_cpu: result = list(psutil.cpu_percent(interval, True)) else: result = psutil.cpu_percent(interval) return result def cpu_times(per_cpu=False): ''' Return the percent of time the CPU spends in each state, e.g. user, system, idle, nice, iowait, irq, softirq. per_cpu if True return an array of percents for each CPU, otherwise aggregate all percents into one number CLI Example: .. code-block:: bash salt '*' ps.cpu_times ''' if per_cpu: result = [dict(times._asdict()) for times in psutil.cpu_times(True)] else: result = dict(psutil.cpu_times(per_cpu)._asdict()) return result def virtual_memory(): ''' .. versionadded:: 2014.7.0 Return a dict that describes statistics about system memory usage. .. note:: This function is only available in psutil version 0.6.0 and above. CLI Example: .. code-block:: bash salt '*' ps.virtual_memory ''' if psutil.version_info < (0, 6, 0): msg = 'virtual_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) return dict(psutil.virtual_memory()._asdict()) def swap_memory(): ''' .. versionadded:: 2014.7.0 Return a dict that describes swap memory statistics. .. note:: This function is only available in psutil version 0.6.0 and above. CLI Example: .. code-block:: bash salt '*' ps.swap_memory ''' if psutil.version_info < (0, 6, 0): msg = 'swap_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) return dict(psutil.swap_memory()._asdict()) def disk_partitions(all=False): ''' Return a list of disk partitions and their device, mount point, and filesystem type. all if set to False, only return local, physical partitions (hard disk, USB, CD/DVD partitions). If True, return all filesystems. CLI Example: .. code-block:: bash salt '*' ps.disk_partitions ''' result = [dict(partition._asdict()) for partition in psutil.disk_partitions(all)] return result def disk_usage(path): ''' Given a path, return a dict listing the total available space as well as the free space, and used space. CLI Example: .. code-block:: bash salt '*' ps.disk_usage /home ''' return dict(psutil.disk_usage(path)._asdict()) def disk_partition_usage(all=False): ''' Return a list of disk partitions plus the mount point, filesystem and usage statistics. CLI Example: .. code-block:: bash salt '*' ps.disk_partition_usage ''' result = disk_partitions(all) for partition in result: partition.update(disk_usage(partition['mountpoint'])) return result def total_physical_memory(): ''' Return the total number of bytes of physical memory. CLI Example: .. code-block:: bash salt '*' ps.total_physical_memory ''' if psutil.version_info < (0, 6, 0): msg = 'virtual_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) try: return psutil.virtual_memory().total except AttributeError: # TOTAL_PHYMEM is deprecated but with older psutil versions this is # needed as a fallback. return psutil.TOTAL_PHYMEM def num_cpus(): ''' Return the number of CPUs. CLI Example: .. code-block:: bash salt '*' ps.num_cpus ''' try: return psutil.cpu_count() except AttributeError: # NUM_CPUS is deprecated but with older psutil versions this is needed # as a fallback. return psutil.NUM_CPUS def boot_time(time_format=None): ''' Return the boot time in number of seconds since the epoch began. CLI Example: time_format Optionally specify a `strftime`_ format string. Use ``time_format='%c'`` to get a nicely-formatted locale specific date and time (i.e. ``Fri May 2 19:08:32 2014``). .. _strftime: https://docs.python.org/2/library/datetime.html#strftime-strptime-behavior .. versionadded:: 2014.1.4 .. code-block:: bash salt '*' ps.boot_time ''' try: b_time = int(psutil.boot_time()) except AttributeError: # get_boot_time() has been removed in newer psutil versions, and has # been replaced by boot_time() which provides the same information. b_time = int(psutil.boot_time()) if time_format: # Load epoch timestamp as a datetime.datetime object b_time = datetime.datetime.fromtimestamp(b_time) try: return b_time.strftime(time_format) except TypeError as exc: raise SaltInvocationError('Invalid format string: {0}'.format(exc)) return b_time def network_io_counters(interface=None): ''' Return network I/O statistics. CLI Example: .. code-block:: bash salt '*' ps.network_io_counters salt '*' ps.network_io_counters interface=eth0 ''' if not interface: return dict(psutil.net_io_counters()._asdict()) else: stats = psutil.net_io_counters(pernic=True) if interface in stats: return dict(stats[interface]._asdict()) else: return False def disk_io_counters(device=None): ''' Return disk I/O statistics. CLI Example: .. code-block:: bash salt '*' ps.disk_io_counters salt '*' ps.disk_io_counters device=sda1 ''' if not device: return dict(psutil.disk_io_counters()._asdict()) else: stats = psutil.disk_io_counters(perdisk=True) if device in stats: return dict(stats[device]._asdict()) else: return False def get_users(): ''' Return logged-in users. CLI Example: .. code-block:: bash salt '*' ps.get_users ''' try: recs = psutil.users() return [dict(x._asdict()) for x in recs] except AttributeError: # get_users is only present in psutil > v0.5.0 # try utmp try: import utmp # pylint: disable=import-error result = [] while True: rec = utmp.utmpaccess.getutent() if rec is None: return result elif rec[0] == 7: started = rec[8] if isinstance(started, tuple): started = started[0] result.append({'name': rec[4], 'terminal': rec[2], 'started': started, 'host': rec[5]}) except ImportError: return False def lsof(name): ''' Retrieve the lsof information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.lsof apache2 ''' sanitize_name = six.text_type(name) lsof_infos = __salt__['cmd.run']("lsof -c " + sanitize_name) ret = [] ret.extend([sanitize_name, lsof_infos]) return ret @salt.utils.decorators.path.which('netstat') def netstat(name): ''' Retrieve the netstat information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.netstat apache2 ''' sanitize_name = six.text_type(name) netstat_infos = __salt__['cmd.run']("netstat -nap") found_infos = [] ret = [] for info in netstat_infos.splitlines(): if info.find(sanitize_name) != -1: found_infos.append(info) ret.extend([sanitize_name, found_infos]) return ret @salt.utils.decorators.path.which('ss') def ss(name): ''' Retrieve the ss information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.ss apache2 .. versionadded:: 2016.11.6 ''' sanitize_name = six.text_type(name) ss_infos = __salt__['cmd.run']("ss -neap") found_infos = [] ret = [] for info in ss_infos.splitlines(): if info.find(sanitize_name) != -1: found_infos.append(info) ret.extend([sanitize_name, found_infos]) return ret def psaux(name): ''' Retrieve information corresponding to a "ps aux" filtered with the given pattern. It could be just a name or a regular expression (using python search from "re" module). CLI Example: .. code-block:: bash salt '*' ps.psaux www-data.+apache2 ''' sanitize_name = six.text_type(name) pattern = re.compile(sanitize_name) salt_exception_pattern = re.compile("salt.+ps.psaux.+") ps_aux = __salt__['cmd.run']("ps aux") found_infos = [] ret = [] nb_lines = 0 for info in ps_aux.splitlines(): found = pattern.search(info) if found is not None: # remove 'salt' command from results if not salt_exception_pattern.search(info): nb_lines += 1 found_infos.append(info) pid_count = six.text_type(nb_lines) + " occurence(s)." ret = [] ret.extend([sanitize_name, found_infos, pid_count]) return ret
saltstack/salt
salt/modules/ps.py
_get_proc_name
python
def _get_proc_name(proc): ''' Returns the name of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.name() if PSUTIL2 else proc.name) except (psutil.NoSuchProcess, psutil.AccessDenied): return []
Returns the name of a Process instance. It's backward compatible with < 2.0 versions of psutil.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/ps.py#L74-L83
[ "def decode(data, encoding=None, errors='strict', keep=False,\n normalize=False, preserve_dict_class=False, preserve_tuples=False,\n to_str=False):\n '''\n Generic function which will decode whichever type is passed, if necessary.\n Optionally use to_str=True to ensure strings are str types and not unicode\n on Python 2.\n\n If `strict` is True, and `keep` is False, and we fail to decode, a\n UnicodeDecodeError will be raised. Passing `keep` as True allows for the\n original value to silently be returned in cases where decoding fails. This\n can be useful for cases where the data passed to this function is likely to\n contain binary blobs, such as in the case of cp.recv.\n\n If `normalize` is True, then unicodedata.normalize() will be used to\n normalize unicode strings down to a single code point per glyph. It is\n recommended not to normalize unless you know what you're doing. For\n instance, if `data` contains a dictionary, it is possible that normalizing\n will lead to data loss because the following two strings will normalize to\n the same value:\n\n - u'\\\\u044f\\\\u0438\\\\u0306\\\\u0446\\\\u0430.txt'\n - u'\\\\u044f\\\\u0439\\\\u0446\\\\u0430.txt'\n\n One good use case for normalization is in the test suite. For example, on\n some platforms such as Mac OS, os.listdir() will produce the first of the\n two strings above, in which \"й\" is represented as two code points (i.e. one\n for the base character, and one for the breve mark). Normalizing allows for\n a more reliable test case.\n '''\n _decode_func = salt.utils.stringutils.to_unicode \\\n if not to_str \\\n else salt.utils.stringutils.to_str\n if isinstance(data, Mapping):\n return decode_dict(data, encoding, errors, keep, normalize,\n preserve_dict_class, preserve_tuples, to_str)\n elif isinstance(data, list):\n return decode_list(data, encoding, errors, keep, normalize,\n preserve_dict_class, preserve_tuples, to_str)\n elif isinstance(data, tuple):\n return decode_tuple(data, encoding, errors, keep, normalize,\n preserve_dict_class, to_str) \\\n if preserve_tuples \\\n else decode_list(data, encoding, errors, keep, normalize,\n preserve_dict_class, preserve_tuples, to_str)\n else:\n try:\n data = _decode_func(data, encoding, errors, normalize)\n except TypeError:\n # to_unicode raises a TypeError when input is not a\n # string/bytestring/bytearray. This is expected and simply means we\n # are going to leave the value as-is.\n pass\n except UnicodeDecodeError:\n if not keep:\n raise\n return data\n" ]
# -*- coding: utf-8 -*- ''' A salt interface to psutil, a system and process library. See http://code.google.com/p/psutil. :depends: - psutil Python module, version 0.3.0 or later - python-utmp package (optional) ''' # Import python libs from __future__ import absolute_import, unicode_literals, print_function import time import datetime import re # Import salt libs import salt.utils.data from salt.exceptions import SaltInvocationError, CommandExecutionError # Import third party libs import salt.utils.decorators.path from salt.ext import six # pylint: disable=import-error try: import salt.utils.psutil_compat as psutil HAS_PSUTIL = True PSUTIL2 = getattr(psutil, 'version_info', ()) >= (2, 0) except ImportError: HAS_PSUTIL = False # pylint: enable=import-error def __virtual__(): if not HAS_PSUTIL: return False, 'The ps module cannot be loaded: python module psutil not installed.' # Functions and attributes used in this execution module seem to have been # added as of psutil 0.3.0, from an inspection of the source code. Only # make this module available if the version of psutil is >= 0.3.0. Note # that this may need to be tweaked if we find post-0.3.0 versions which # also have problems running the functions in this execution module, but # most distributions have already moved to later versions (for example, # as of Dec. 2013 EPEL is on 0.6.1, Debian 7 is on 0.5.1, etc.). if psutil.version_info >= (0, 3, 0): return True return (False, 'The ps execution module cannot be loaded: the psutil python module version {0} is less than 0.3.0'.format(psutil.version_info)) def _get_proc_cmdline(proc): ''' Returns the cmdline of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.cmdline() if PSUTIL2 else proc.cmdline) except (psutil.NoSuchProcess, psutil.AccessDenied): return [] def _get_proc_create_time(proc): ''' Returns the create_time of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.create_time() if PSUTIL2 else proc.create_time) except (psutil.NoSuchProcess, psutil.AccessDenied): return None def _get_proc_status(proc): ''' Returns the status of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.status() if PSUTIL2 else proc.status) except (psutil.NoSuchProcess, psutil.AccessDenied): return None def _get_proc_username(proc): ''' Returns the username of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.username() if PSUTIL2 else proc.username) except (psutil.NoSuchProcess, psutil.AccessDenied, KeyError): return None def _get_proc_pid(proc): ''' Returns the pid of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' return proc.pid def top(num_processes=5, interval=3): ''' Return a list of top CPU consuming processes during the interval. num_processes = return the top N CPU consuming processes interval = the number of seconds to sample CPU usage over CLI Examples: .. code-block:: bash salt '*' ps.top salt '*' ps.top 5 10 ''' result = [] start_usage = {} for pid in psutil.pids(): try: process = psutil.Process(pid) user, system = process.cpu_times() except ValueError: user, system, _, _ = process.cpu_times() except psutil.NoSuchProcess: continue start_usage[process] = user + system time.sleep(interval) usage = set() for process, start in six.iteritems(start_usage): try: user, system = process.cpu_times() except ValueError: user, system, _, _ = process.cpu_times() except psutil.NoSuchProcess: continue now = user + system diff = now - start usage.add((diff, process)) for idx, (diff, process) in enumerate(reversed(sorted(usage))): if num_processes and idx >= num_processes: break if not _get_proc_cmdline(process): cmdline = _get_proc_name(process) else: cmdline = _get_proc_cmdline(process) info = {'cmd': cmdline, 'user': _get_proc_username(process), 'status': _get_proc_status(process), 'pid': _get_proc_pid(process), 'create_time': _get_proc_create_time(process), 'cpu': {}, 'mem': {}, } for key, value in six.iteritems(process.cpu_times()._asdict()): info['cpu'][key] = value for key, value in six.iteritems(process.memory_info()._asdict()): info['mem'][key] = value result.append(info) return result def get_pid_list(): ''' Return a list of process ids (PIDs) for all running processes. CLI Example: .. code-block:: bash salt '*' ps.get_pid_list ''' return psutil.pids() def proc_info(pid, attrs=None): ''' Return a dictionary of information for a process id (PID). CLI Example: .. code-block:: bash salt '*' ps.proc_info 2322 salt '*' ps.proc_info 2322 attrs='["pid", "name"]' pid PID of process to query. attrs Optional list of desired process attributes. The list of possible attributes can be found here: http://pythonhosted.org/psutil/#psutil.Process ''' try: proc = psutil.Process(pid) return proc.as_dict(attrs) except (psutil.NoSuchProcess, psutil.AccessDenied, AttributeError) as exc: raise CommandExecutionError(exc) def kill_pid(pid, signal=15): ''' Kill a process by PID. .. code-block:: bash salt 'minion' ps.kill_pid pid [signal=signal_number] pid PID of process to kill. signal Signal to send to the process. See manpage entry for kill for possible values. Default: 15 (SIGTERM). **Example:** Send SIGKILL to process with PID 2000: .. code-block:: bash salt 'minion' ps.kill_pid 2000 signal=9 ''' try: psutil.Process(pid).send_signal(signal) return True except psutil.NoSuchProcess: return False def pkill(pattern, user=None, signal=15, full=False): ''' Kill processes matching a pattern. .. code-block:: bash salt '*' ps.pkill pattern [user=username] [signal=signal_number] \\ [full=(true|false)] pattern Pattern to search for in the process list. user Limit matches to the given username. Default: All users. signal Signal to send to the process(es). See manpage entry for kill for possible values. Default: 15 (SIGTERM). full A boolean value indicating whether only the name of the command or the full command line should be matched against the pattern. **Examples:** Send SIGHUP to all httpd processes on all 'www' minions: .. code-block:: bash salt 'www.*' ps.pkill httpd signal=1 Send SIGKILL to all bash processes owned by user 'tom': .. code-block:: bash salt '*' ps.pkill bash signal=9 user=tom ''' killed = [] for proc in psutil.process_iter(): name_match = pattern in ' '.join(_get_proc_cmdline(proc)) if full \ else pattern in _get_proc_name(proc) user_match = True if user is None else user == _get_proc_username(proc) if name_match and user_match: try: proc.send_signal(signal) killed.append(_get_proc_pid(proc)) except psutil.NoSuchProcess: pass if not killed: return None else: return {'killed': killed} def pgrep(pattern, user=None, full=False, pattern_is_regex=False): ''' Return the pids for processes matching a pattern. If full is true, the full command line is searched for a match, otherwise only the name of the command is searched. .. code-block:: bash salt '*' ps.pgrep pattern [user=username] [full=(true|false)] pattern Pattern to search for in the process list. user Limit matches to the given username. Default: All users. full A boolean value indicating whether only the name of the command or the full command line should be matched against the pattern. pattern_is_regex This flag enables ps.pgrep to mirror the regex search functionality found in the pgrep command line utility. .. versionadded:: Neon **Examples:** Find all httpd processes on all 'www' minions: .. code-block:: bash salt 'www.*' ps.pgrep httpd Find all bash processes owned by user 'tom': .. code-block:: bash salt '*' ps.pgrep bash user=tom ''' procs = [] if pattern_is_regex: pattern = re.compile(str(pattern)) procs = [] for proc in psutil.process_iter(): if full: process_line = ' '.join(_get_proc_cmdline(proc)) else: process_line = _get_proc_name(proc) if pattern_is_regex: name_match = re.search(pattern, process_line) else: name_match = pattern in process_line user_match = True if user is None else user == _get_proc_username(proc) if name_match and user_match: procs.append(_get_proc_pid(proc)) return procs or None def cpu_percent(interval=0.1, per_cpu=False): ''' Return the percent of time the CPU is busy. interval the number of seconds to sample CPU usage over per_cpu if True return an array of CPU percent busy for each CPU, otherwise aggregate all percents into one number CLI Example: .. code-block:: bash salt '*' ps.cpu_percent ''' if per_cpu: result = list(psutil.cpu_percent(interval, True)) else: result = psutil.cpu_percent(interval) return result def cpu_times(per_cpu=False): ''' Return the percent of time the CPU spends in each state, e.g. user, system, idle, nice, iowait, irq, softirq. per_cpu if True return an array of percents for each CPU, otherwise aggregate all percents into one number CLI Example: .. code-block:: bash salt '*' ps.cpu_times ''' if per_cpu: result = [dict(times._asdict()) for times in psutil.cpu_times(True)] else: result = dict(psutil.cpu_times(per_cpu)._asdict()) return result def virtual_memory(): ''' .. versionadded:: 2014.7.0 Return a dict that describes statistics about system memory usage. .. note:: This function is only available in psutil version 0.6.0 and above. CLI Example: .. code-block:: bash salt '*' ps.virtual_memory ''' if psutil.version_info < (0, 6, 0): msg = 'virtual_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) return dict(psutil.virtual_memory()._asdict()) def swap_memory(): ''' .. versionadded:: 2014.7.0 Return a dict that describes swap memory statistics. .. note:: This function is only available in psutil version 0.6.0 and above. CLI Example: .. code-block:: bash salt '*' ps.swap_memory ''' if psutil.version_info < (0, 6, 0): msg = 'swap_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) return dict(psutil.swap_memory()._asdict()) def disk_partitions(all=False): ''' Return a list of disk partitions and their device, mount point, and filesystem type. all if set to False, only return local, physical partitions (hard disk, USB, CD/DVD partitions). If True, return all filesystems. CLI Example: .. code-block:: bash salt '*' ps.disk_partitions ''' result = [dict(partition._asdict()) for partition in psutil.disk_partitions(all)] return result def disk_usage(path): ''' Given a path, return a dict listing the total available space as well as the free space, and used space. CLI Example: .. code-block:: bash salt '*' ps.disk_usage /home ''' return dict(psutil.disk_usage(path)._asdict()) def disk_partition_usage(all=False): ''' Return a list of disk partitions plus the mount point, filesystem and usage statistics. CLI Example: .. code-block:: bash salt '*' ps.disk_partition_usage ''' result = disk_partitions(all) for partition in result: partition.update(disk_usage(partition['mountpoint'])) return result def total_physical_memory(): ''' Return the total number of bytes of physical memory. CLI Example: .. code-block:: bash salt '*' ps.total_physical_memory ''' if psutil.version_info < (0, 6, 0): msg = 'virtual_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) try: return psutil.virtual_memory().total except AttributeError: # TOTAL_PHYMEM is deprecated but with older psutil versions this is # needed as a fallback. return psutil.TOTAL_PHYMEM def num_cpus(): ''' Return the number of CPUs. CLI Example: .. code-block:: bash salt '*' ps.num_cpus ''' try: return psutil.cpu_count() except AttributeError: # NUM_CPUS is deprecated but with older psutil versions this is needed # as a fallback. return psutil.NUM_CPUS def boot_time(time_format=None): ''' Return the boot time in number of seconds since the epoch began. CLI Example: time_format Optionally specify a `strftime`_ format string. Use ``time_format='%c'`` to get a nicely-formatted locale specific date and time (i.e. ``Fri May 2 19:08:32 2014``). .. _strftime: https://docs.python.org/2/library/datetime.html#strftime-strptime-behavior .. versionadded:: 2014.1.4 .. code-block:: bash salt '*' ps.boot_time ''' try: b_time = int(psutil.boot_time()) except AttributeError: # get_boot_time() has been removed in newer psutil versions, and has # been replaced by boot_time() which provides the same information. b_time = int(psutil.boot_time()) if time_format: # Load epoch timestamp as a datetime.datetime object b_time = datetime.datetime.fromtimestamp(b_time) try: return b_time.strftime(time_format) except TypeError as exc: raise SaltInvocationError('Invalid format string: {0}'.format(exc)) return b_time def network_io_counters(interface=None): ''' Return network I/O statistics. CLI Example: .. code-block:: bash salt '*' ps.network_io_counters salt '*' ps.network_io_counters interface=eth0 ''' if not interface: return dict(psutil.net_io_counters()._asdict()) else: stats = psutil.net_io_counters(pernic=True) if interface in stats: return dict(stats[interface]._asdict()) else: return False def disk_io_counters(device=None): ''' Return disk I/O statistics. CLI Example: .. code-block:: bash salt '*' ps.disk_io_counters salt '*' ps.disk_io_counters device=sda1 ''' if not device: return dict(psutil.disk_io_counters()._asdict()) else: stats = psutil.disk_io_counters(perdisk=True) if device in stats: return dict(stats[device]._asdict()) else: return False def get_users(): ''' Return logged-in users. CLI Example: .. code-block:: bash salt '*' ps.get_users ''' try: recs = psutil.users() return [dict(x._asdict()) for x in recs] except AttributeError: # get_users is only present in psutil > v0.5.0 # try utmp try: import utmp # pylint: disable=import-error result = [] while True: rec = utmp.utmpaccess.getutent() if rec is None: return result elif rec[0] == 7: started = rec[8] if isinstance(started, tuple): started = started[0] result.append({'name': rec[4], 'terminal': rec[2], 'started': started, 'host': rec[5]}) except ImportError: return False def lsof(name): ''' Retrieve the lsof information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.lsof apache2 ''' sanitize_name = six.text_type(name) lsof_infos = __salt__['cmd.run']("lsof -c " + sanitize_name) ret = [] ret.extend([sanitize_name, lsof_infos]) return ret @salt.utils.decorators.path.which('netstat') def netstat(name): ''' Retrieve the netstat information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.netstat apache2 ''' sanitize_name = six.text_type(name) netstat_infos = __salt__['cmd.run']("netstat -nap") found_infos = [] ret = [] for info in netstat_infos.splitlines(): if info.find(sanitize_name) != -1: found_infos.append(info) ret.extend([sanitize_name, found_infos]) return ret @salt.utils.decorators.path.which('ss') def ss(name): ''' Retrieve the ss information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.ss apache2 .. versionadded:: 2016.11.6 ''' sanitize_name = six.text_type(name) ss_infos = __salt__['cmd.run']("ss -neap") found_infos = [] ret = [] for info in ss_infos.splitlines(): if info.find(sanitize_name) != -1: found_infos.append(info) ret.extend([sanitize_name, found_infos]) return ret def psaux(name): ''' Retrieve information corresponding to a "ps aux" filtered with the given pattern. It could be just a name or a regular expression (using python search from "re" module). CLI Example: .. code-block:: bash salt '*' ps.psaux www-data.+apache2 ''' sanitize_name = six.text_type(name) pattern = re.compile(sanitize_name) salt_exception_pattern = re.compile("salt.+ps.psaux.+") ps_aux = __salt__['cmd.run']("ps aux") found_infos = [] ret = [] nb_lines = 0 for info in ps_aux.splitlines(): found = pattern.search(info) if found is not None: # remove 'salt' command from results if not salt_exception_pattern.search(info): nb_lines += 1 found_infos.append(info) pid_count = six.text_type(nb_lines) + " occurence(s)." ret = [] ret.extend([sanitize_name, found_infos, pid_count]) return ret
saltstack/salt
salt/modules/ps.py
_get_proc_status
python
def _get_proc_status(proc): ''' Returns the status of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.status() if PSUTIL2 else proc.status) except (psutil.NoSuchProcess, psutil.AccessDenied): return None
Returns the status of a Process instance. It's backward compatible with < 2.0 versions of psutil.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/ps.py#L86-L95
[ "def decode(data, encoding=None, errors='strict', keep=False,\n normalize=False, preserve_dict_class=False, preserve_tuples=False,\n to_str=False):\n '''\n Generic function which will decode whichever type is passed, if necessary.\n Optionally use to_str=True to ensure strings are str types and not unicode\n on Python 2.\n\n If `strict` is True, and `keep` is False, and we fail to decode, a\n UnicodeDecodeError will be raised. Passing `keep` as True allows for the\n original value to silently be returned in cases where decoding fails. This\n can be useful for cases where the data passed to this function is likely to\n contain binary blobs, such as in the case of cp.recv.\n\n If `normalize` is True, then unicodedata.normalize() will be used to\n normalize unicode strings down to a single code point per glyph. It is\n recommended not to normalize unless you know what you're doing. For\n instance, if `data` contains a dictionary, it is possible that normalizing\n will lead to data loss because the following two strings will normalize to\n the same value:\n\n - u'\\\\u044f\\\\u0438\\\\u0306\\\\u0446\\\\u0430.txt'\n - u'\\\\u044f\\\\u0439\\\\u0446\\\\u0430.txt'\n\n One good use case for normalization is in the test suite. For example, on\n some platforms such as Mac OS, os.listdir() will produce the first of the\n two strings above, in which \"й\" is represented as two code points (i.e. one\n for the base character, and one for the breve mark). Normalizing allows for\n a more reliable test case.\n '''\n _decode_func = salt.utils.stringutils.to_unicode \\\n if not to_str \\\n else salt.utils.stringutils.to_str\n if isinstance(data, Mapping):\n return decode_dict(data, encoding, errors, keep, normalize,\n preserve_dict_class, preserve_tuples, to_str)\n elif isinstance(data, list):\n return decode_list(data, encoding, errors, keep, normalize,\n preserve_dict_class, preserve_tuples, to_str)\n elif isinstance(data, tuple):\n return decode_tuple(data, encoding, errors, keep, normalize,\n preserve_dict_class, to_str) \\\n if preserve_tuples \\\n else decode_list(data, encoding, errors, keep, normalize,\n preserve_dict_class, preserve_tuples, to_str)\n else:\n try:\n data = _decode_func(data, encoding, errors, normalize)\n except TypeError:\n # to_unicode raises a TypeError when input is not a\n # string/bytestring/bytearray. This is expected and simply means we\n # are going to leave the value as-is.\n pass\n except UnicodeDecodeError:\n if not keep:\n raise\n return data\n" ]
# -*- coding: utf-8 -*- ''' A salt interface to psutil, a system and process library. See http://code.google.com/p/psutil. :depends: - psutil Python module, version 0.3.0 or later - python-utmp package (optional) ''' # Import python libs from __future__ import absolute_import, unicode_literals, print_function import time import datetime import re # Import salt libs import salt.utils.data from salt.exceptions import SaltInvocationError, CommandExecutionError # Import third party libs import salt.utils.decorators.path from salt.ext import six # pylint: disable=import-error try: import salt.utils.psutil_compat as psutil HAS_PSUTIL = True PSUTIL2 = getattr(psutil, 'version_info', ()) >= (2, 0) except ImportError: HAS_PSUTIL = False # pylint: enable=import-error def __virtual__(): if not HAS_PSUTIL: return False, 'The ps module cannot be loaded: python module psutil not installed.' # Functions and attributes used in this execution module seem to have been # added as of psutil 0.3.0, from an inspection of the source code. Only # make this module available if the version of psutil is >= 0.3.0. Note # that this may need to be tweaked if we find post-0.3.0 versions which # also have problems running the functions in this execution module, but # most distributions have already moved to later versions (for example, # as of Dec. 2013 EPEL is on 0.6.1, Debian 7 is on 0.5.1, etc.). if psutil.version_info >= (0, 3, 0): return True return (False, 'The ps execution module cannot be loaded: the psutil python module version {0} is less than 0.3.0'.format(psutil.version_info)) def _get_proc_cmdline(proc): ''' Returns the cmdline of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.cmdline() if PSUTIL2 else proc.cmdline) except (psutil.NoSuchProcess, psutil.AccessDenied): return [] def _get_proc_create_time(proc): ''' Returns the create_time of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.create_time() if PSUTIL2 else proc.create_time) except (psutil.NoSuchProcess, psutil.AccessDenied): return None def _get_proc_name(proc): ''' Returns the name of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.name() if PSUTIL2 else proc.name) except (psutil.NoSuchProcess, psutil.AccessDenied): return [] def _get_proc_username(proc): ''' Returns the username of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.username() if PSUTIL2 else proc.username) except (psutil.NoSuchProcess, psutil.AccessDenied, KeyError): return None def _get_proc_pid(proc): ''' Returns the pid of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' return proc.pid def top(num_processes=5, interval=3): ''' Return a list of top CPU consuming processes during the interval. num_processes = return the top N CPU consuming processes interval = the number of seconds to sample CPU usage over CLI Examples: .. code-block:: bash salt '*' ps.top salt '*' ps.top 5 10 ''' result = [] start_usage = {} for pid in psutil.pids(): try: process = psutil.Process(pid) user, system = process.cpu_times() except ValueError: user, system, _, _ = process.cpu_times() except psutil.NoSuchProcess: continue start_usage[process] = user + system time.sleep(interval) usage = set() for process, start in six.iteritems(start_usage): try: user, system = process.cpu_times() except ValueError: user, system, _, _ = process.cpu_times() except psutil.NoSuchProcess: continue now = user + system diff = now - start usage.add((diff, process)) for idx, (diff, process) in enumerate(reversed(sorted(usage))): if num_processes and idx >= num_processes: break if not _get_proc_cmdline(process): cmdline = _get_proc_name(process) else: cmdline = _get_proc_cmdline(process) info = {'cmd': cmdline, 'user': _get_proc_username(process), 'status': _get_proc_status(process), 'pid': _get_proc_pid(process), 'create_time': _get_proc_create_time(process), 'cpu': {}, 'mem': {}, } for key, value in six.iteritems(process.cpu_times()._asdict()): info['cpu'][key] = value for key, value in six.iteritems(process.memory_info()._asdict()): info['mem'][key] = value result.append(info) return result def get_pid_list(): ''' Return a list of process ids (PIDs) for all running processes. CLI Example: .. code-block:: bash salt '*' ps.get_pid_list ''' return psutil.pids() def proc_info(pid, attrs=None): ''' Return a dictionary of information for a process id (PID). CLI Example: .. code-block:: bash salt '*' ps.proc_info 2322 salt '*' ps.proc_info 2322 attrs='["pid", "name"]' pid PID of process to query. attrs Optional list of desired process attributes. The list of possible attributes can be found here: http://pythonhosted.org/psutil/#psutil.Process ''' try: proc = psutil.Process(pid) return proc.as_dict(attrs) except (psutil.NoSuchProcess, psutil.AccessDenied, AttributeError) as exc: raise CommandExecutionError(exc) def kill_pid(pid, signal=15): ''' Kill a process by PID. .. code-block:: bash salt 'minion' ps.kill_pid pid [signal=signal_number] pid PID of process to kill. signal Signal to send to the process. See manpage entry for kill for possible values. Default: 15 (SIGTERM). **Example:** Send SIGKILL to process with PID 2000: .. code-block:: bash salt 'minion' ps.kill_pid 2000 signal=9 ''' try: psutil.Process(pid).send_signal(signal) return True except psutil.NoSuchProcess: return False def pkill(pattern, user=None, signal=15, full=False): ''' Kill processes matching a pattern. .. code-block:: bash salt '*' ps.pkill pattern [user=username] [signal=signal_number] \\ [full=(true|false)] pattern Pattern to search for in the process list. user Limit matches to the given username. Default: All users. signal Signal to send to the process(es). See manpage entry for kill for possible values. Default: 15 (SIGTERM). full A boolean value indicating whether only the name of the command or the full command line should be matched against the pattern. **Examples:** Send SIGHUP to all httpd processes on all 'www' minions: .. code-block:: bash salt 'www.*' ps.pkill httpd signal=1 Send SIGKILL to all bash processes owned by user 'tom': .. code-block:: bash salt '*' ps.pkill bash signal=9 user=tom ''' killed = [] for proc in psutil.process_iter(): name_match = pattern in ' '.join(_get_proc_cmdline(proc)) if full \ else pattern in _get_proc_name(proc) user_match = True if user is None else user == _get_proc_username(proc) if name_match and user_match: try: proc.send_signal(signal) killed.append(_get_proc_pid(proc)) except psutil.NoSuchProcess: pass if not killed: return None else: return {'killed': killed} def pgrep(pattern, user=None, full=False, pattern_is_regex=False): ''' Return the pids for processes matching a pattern. If full is true, the full command line is searched for a match, otherwise only the name of the command is searched. .. code-block:: bash salt '*' ps.pgrep pattern [user=username] [full=(true|false)] pattern Pattern to search for in the process list. user Limit matches to the given username. Default: All users. full A boolean value indicating whether only the name of the command or the full command line should be matched against the pattern. pattern_is_regex This flag enables ps.pgrep to mirror the regex search functionality found in the pgrep command line utility. .. versionadded:: Neon **Examples:** Find all httpd processes on all 'www' minions: .. code-block:: bash salt 'www.*' ps.pgrep httpd Find all bash processes owned by user 'tom': .. code-block:: bash salt '*' ps.pgrep bash user=tom ''' procs = [] if pattern_is_regex: pattern = re.compile(str(pattern)) procs = [] for proc in psutil.process_iter(): if full: process_line = ' '.join(_get_proc_cmdline(proc)) else: process_line = _get_proc_name(proc) if pattern_is_regex: name_match = re.search(pattern, process_line) else: name_match = pattern in process_line user_match = True if user is None else user == _get_proc_username(proc) if name_match and user_match: procs.append(_get_proc_pid(proc)) return procs or None def cpu_percent(interval=0.1, per_cpu=False): ''' Return the percent of time the CPU is busy. interval the number of seconds to sample CPU usage over per_cpu if True return an array of CPU percent busy for each CPU, otherwise aggregate all percents into one number CLI Example: .. code-block:: bash salt '*' ps.cpu_percent ''' if per_cpu: result = list(psutil.cpu_percent(interval, True)) else: result = psutil.cpu_percent(interval) return result def cpu_times(per_cpu=False): ''' Return the percent of time the CPU spends in each state, e.g. user, system, idle, nice, iowait, irq, softirq. per_cpu if True return an array of percents for each CPU, otherwise aggregate all percents into one number CLI Example: .. code-block:: bash salt '*' ps.cpu_times ''' if per_cpu: result = [dict(times._asdict()) for times in psutil.cpu_times(True)] else: result = dict(psutil.cpu_times(per_cpu)._asdict()) return result def virtual_memory(): ''' .. versionadded:: 2014.7.0 Return a dict that describes statistics about system memory usage. .. note:: This function is only available in psutil version 0.6.0 and above. CLI Example: .. code-block:: bash salt '*' ps.virtual_memory ''' if psutil.version_info < (0, 6, 0): msg = 'virtual_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) return dict(psutil.virtual_memory()._asdict()) def swap_memory(): ''' .. versionadded:: 2014.7.0 Return a dict that describes swap memory statistics. .. note:: This function is only available in psutil version 0.6.0 and above. CLI Example: .. code-block:: bash salt '*' ps.swap_memory ''' if psutil.version_info < (0, 6, 0): msg = 'swap_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) return dict(psutil.swap_memory()._asdict()) def disk_partitions(all=False): ''' Return a list of disk partitions and their device, mount point, and filesystem type. all if set to False, only return local, physical partitions (hard disk, USB, CD/DVD partitions). If True, return all filesystems. CLI Example: .. code-block:: bash salt '*' ps.disk_partitions ''' result = [dict(partition._asdict()) for partition in psutil.disk_partitions(all)] return result def disk_usage(path): ''' Given a path, return a dict listing the total available space as well as the free space, and used space. CLI Example: .. code-block:: bash salt '*' ps.disk_usage /home ''' return dict(psutil.disk_usage(path)._asdict()) def disk_partition_usage(all=False): ''' Return a list of disk partitions plus the mount point, filesystem and usage statistics. CLI Example: .. code-block:: bash salt '*' ps.disk_partition_usage ''' result = disk_partitions(all) for partition in result: partition.update(disk_usage(partition['mountpoint'])) return result def total_physical_memory(): ''' Return the total number of bytes of physical memory. CLI Example: .. code-block:: bash salt '*' ps.total_physical_memory ''' if psutil.version_info < (0, 6, 0): msg = 'virtual_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) try: return psutil.virtual_memory().total except AttributeError: # TOTAL_PHYMEM is deprecated but with older psutil versions this is # needed as a fallback. return psutil.TOTAL_PHYMEM def num_cpus(): ''' Return the number of CPUs. CLI Example: .. code-block:: bash salt '*' ps.num_cpus ''' try: return psutil.cpu_count() except AttributeError: # NUM_CPUS is deprecated but with older psutil versions this is needed # as a fallback. return psutil.NUM_CPUS def boot_time(time_format=None): ''' Return the boot time in number of seconds since the epoch began. CLI Example: time_format Optionally specify a `strftime`_ format string. Use ``time_format='%c'`` to get a nicely-formatted locale specific date and time (i.e. ``Fri May 2 19:08:32 2014``). .. _strftime: https://docs.python.org/2/library/datetime.html#strftime-strptime-behavior .. versionadded:: 2014.1.4 .. code-block:: bash salt '*' ps.boot_time ''' try: b_time = int(psutil.boot_time()) except AttributeError: # get_boot_time() has been removed in newer psutil versions, and has # been replaced by boot_time() which provides the same information. b_time = int(psutil.boot_time()) if time_format: # Load epoch timestamp as a datetime.datetime object b_time = datetime.datetime.fromtimestamp(b_time) try: return b_time.strftime(time_format) except TypeError as exc: raise SaltInvocationError('Invalid format string: {0}'.format(exc)) return b_time def network_io_counters(interface=None): ''' Return network I/O statistics. CLI Example: .. code-block:: bash salt '*' ps.network_io_counters salt '*' ps.network_io_counters interface=eth0 ''' if not interface: return dict(psutil.net_io_counters()._asdict()) else: stats = psutil.net_io_counters(pernic=True) if interface in stats: return dict(stats[interface]._asdict()) else: return False def disk_io_counters(device=None): ''' Return disk I/O statistics. CLI Example: .. code-block:: bash salt '*' ps.disk_io_counters salt '*' ps.disk_io_counters device=sda1 ''' if not device: return dict(psutil.disk_io_counters()._asdict()) else: stats = psutil.disk_io_counters(perdisk=True) if device in stats: return dict(stats[device]._asdict()) else: return False def get_users(): ''' Return logged-in users. CLI Example: .. code-block:: bash salt '*' ps.get_users ''' try: recs = psutil.users() return [dict(x._asdict()) for x in recs] except AttributeError: # get_users is only present in psutil > v0.5.0 # try utmp try: import utmp # pylint: disable=import-error result = [] while True: rec = utmp.utmpaccess.getutent() if rec is None: return result elif rec[0] == 7: started = rec[8] if isinstance(started, tuple): started = started[0] result.append({'name': rec[4], 'terminal': rec[2], 'started': started, 'host': rec[5]}) except ImportError: return False def lsof(name): ''' Retrieve the lsof information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.lsof apache2 ''' sanitize_name = six.text_type(name) lsof_infos = __salt__['cmd.run']("lsof -c " + sanitize_name) ret = [] ret.extend([sanitize_name, lsof_infos]) return ret @salt.utils.decorators.path.which('netstat') def netstat(name): ''' Retrieve the netstat information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.netstat apache2 ''' sanitize_name = six.text_type(name) netstat_infos = __salt__['cmd.run']("netstat -nap") found_infos = [] ret = [] for info in netstat_infos.splitlines(): if info.find(sanitize_name) != -1: found_infos.append(info) ret.extend([sanitize_name, found_infos]) return ret @salt.utils.decorators.path.which('ss') def ss(name): ''' Retrieve the ss information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.ss apache2 .. versionadded:: 2016.11.6 ''' sanitize_name = six.text_type(name) ss_infos = __salt__['cmd.run']("ss -neap") found_infos = [] ret = [] for info in ss_infos.splitlines(): if info.find(sanitize_name) != -1: found_infos.append(info) ret.extend([sanitize_name, found_infos]) return ret def psaux(name): ''' Retrieve information corresponding to a "ps aux" filtered with the given pattern. It could be just a name or a regular expression (using python search from "re" module). CLI Example: .. code-block:: bash salt '*' ps.psaux www-data.+apache2 ''' sanitize_name = six.text_type(name) pattern = re.compile(sanitize_name) salt_exception_pattern = re.compile("salt.+ps.psaux.+") ps_aux = __salt__['cmd.run']("ps aux") found_infos = [] ret = [] nb_lines = 0 for info in ps_aux.splitlines(): found = pattern.search(info) if found is not None: # remove 'salt' command from results if not salt_exception_pattern.search(info): nb_lines += 1 found_infos.append(info) pid_count = six.text_type(nb_lines) + " occurence(s)." ret = [] ret.extend([sanitize_name, found_infos, pid_count]) return ret
saltstack/salt
salt/modules/ps.py
_get_proc_username
python
def _get_proc_username(proc): ''' Returns the username of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.username() if PSUTIL2 else proc.username) except (psutil.NoSuchProcess, psutil.AccessDenied, KeyError): return None
Returns the username of a Process instance. It's backward compatible with < 2.0 versions of psutil.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/ps.py#L98-L107
[ "def decode(data, encoding=None, errors='strict', keep=False,\n normalize=False, preserve_dict_class=False, preserve_tuples=False,\n to_str=False):\n '''\n Generic function which will decode whichever type is passed, if necessary.\n Optionally use to_str=True to ensure strings are str types and not unicode\n on Python 2.\n\n If `strict` is True, and `keep` is False, and we fail to decode, a\n UnicodeDecodeError will be raised. Passing `keep` as True allows for the\n original value to silently be returned in cases where decoding fails. This\n can be useful for cases where the data passed to this function is likely to\n contain binary blobs, such as in the case of cp.recv.\n\n If `normalize` is True, then unicodedata.normalize() will be used to\n normalize unicode strings down to a single code point per glyph. It is\n recommended not to normalize unless you know what you're doing. For\n instance, if `data` contains a dictionary, it is possible that normalizing\n will lead to data loss because the following two strings will normalize to\n the same value:\n\n - u'\\\\u044f\\\\u0438\\\\u0306\\\\u0446\\\\u0430.txt'\n - u'\\\\u044f\\\\u0439\\\\u0446\\\\u0430.txt'\n\n One good use case for normalization is in the test suite. For example, on\n some platforms such as Mac OS, os.listdir() will produce the first of the\n two strings above, in which \"й\" is represented as two code points (i.e. one\n for the base character, and one for the breve mark). Normalizing allows for\n a more reliable test case.\n '''\n _decode_func = salt.utils.stringutils.to_unicode \\\n if not to_str \\\n else salt.utils.stringutils.to_str\n if isinstance(data, Mapping):\n return decode_dict(data, encoding, errors, keep, normalize,\n preserve_dict_class, preserve_tuples, to_str)\n elif isinstance(data, list):\n return decode_list(data, encoding, errors, keep, normalize,\n preserve_dict_class, preserve_tuples, to_str)\n elif isinstance(data, tuple):\n return decode_tuple(data, encoding, errors, keep, normalize,\n preserve_dict_class, to_str) \\\n if preserve_tuples \\\n else decode_list(data, encoding, errors, keep, normalize,\n preserve_dict_class, preserve_tuples, to_str)\n else:\n try:\n data = _decode_func(data, encoding, errors, normalize)\n except TypeError:\n # to_unicode raises a TypeError when input is not a\n # string/bytestring/bytearray. This is expected and simply means we\n # are going to leave the value as-is.\n pass\n except UnicodeDecodeError:\n if not keep:\n raise\n return data\n" ]
# -*- coding: utf-8 -*- ''' A salt interface to psutil, a system and process library. See http://code.google.com/p/psutil. :depends: - psutil Python module, version 0.3.0 or later - python-utmp package (optional) ''' # Import python libs from __future__ import absolute_import, unicode_literals, print_function import time import datetime import re # Import salt libs import salt.utils.data from salt.exceptions import SaltInvocationError, CommandExecutionError # Import third party libs import salt.utils.decorators.path from salt.ext import six # pylint: disable=import-error try: import salt.utils.psutil_compat as psutil HAS_PSUTIL = True PSUTIL2 = getattr(psutil, 'version_info', ()) >= (2, 0) except ImportError: HAS_PSUTIL = False # pylint: enable=import-error def __virtual__(): if not HAS_PSUTIL: return False, 'The ps module cannot be loaded: python module psutil not installed.' # Functions and attributes used in this execution module seem to have been # added as of psutil 0.3.0, from an inspection of the source code. Only # make this module available if the version of psutil is >= 0.3.0. Note # that this may need to be tweaked if we find post-0.3.0 versions which # also have problems running the functions in this execution module, but # most distributions have already moved to later versions (for example, # as of Dec. 2013 EPEL is on 0.6.1, Debian 7 is on 0.5.1, etc.). if psutil.version_info >= (0, 3, 0): return True return (False, 'The ps execution module cannot be loaded: the psutil python module version {0} is less than 0.3.0'.format(psutil.version_info)) def _get_proc_cmdline(proc): ''' Returns the cmdline of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.cmdline() if PSUTIL2 else proc.cmdline) except (psutil.NoSuchProcess, psutil.AccessDenied): return [] def _get_proc_create_time(proc): ''' Returns the create_time of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.create_time() if PSUTIL2 else proc.create_time) except (psutil.NoSuchProcess, psutil.AccessDenied): return None def _get_proc_name(proc): ''' Returns the name of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.name() if PSUTIL2 else proc.name) except (psutil.NoSuchProcess, psutil.AccessDenied): return [] def _get_proc_status(proc): ''' Returns the status of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.status() if PSUTIL2 else proc.status) except (psutil.NoSuchProcess, psutil.AccessDenied): return None def _get_proc_pid(proc): ''' Returns the pid of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' return proc.pid def top(num_processes=5, interval=3): ''' Return a list of top CPU consuming processes during the interval. num_processes = return the top N CPU consuming processes interval = the number of seconds to sample CPU usage over CLI Examples: .. code-block:: bash salt '*' ps.top salt '*' ps.top 5 10 ''' result = [] start_usage = {} for pid in psutil.pids(): try: process = psutil.Process(pid) user, system = process.cpu_times() except ValueError: user, system, _, _ = process.cpu_times() except psutil.NoSuchProcess: continue start_usage[process] = user + system time.sleep(interval) usage = set() for process, start in six.iteritems(start_usage): try: user, system = process.cpu_times() except ValueError: user, system, _, _ = process.cpu_times() except psutil.NoSuchProcess: continue now = user + system diff = now - start usage.add((diff, process)) for idx, (diff, process) in enumerate(reversed(sorted(usage))): if num_processes and idx >= num_processes: break if not _get_proc_cmdline(process): cmdline = _get_proc_name(process) else: cmdline = _get_proc_cmdline(process) info = {'cmd': cmdline, 'user': _get_proc_username(process), 'status': _get_proc_status(process), 'pid': _get_proc_pid(process), 'create_time': _get_proc_create_time(process), 'cpu': {}, 'mem': {}, } for key, value in six.iteritems(process.cpu_times()._asdict()): info['cpu'][key] = value for key, value in six.iteritems(process.memory_info()._asdict()): info['mem'][key] = value result.append(info) return result def get_pid_list(): ''' Return a list of process ids (PIDs) for all running processes. CLI Example: .. code-block:: bash salt '*' ps.get_pid_list ''' return psutil.pids() def proc_info(pid, attrs=None): ''' Return a dictionary of information for a process id (PID). CLI Example: .. code-block:: bash salt '*' ps.proc_info 2322 salt '*' ps.proc_info 2322 attrs='["pid", "name"]' pid PID of process to query. attrs Optional list of desired process attributes. The list of possible attributes can be found here: http://pythonhosted.org/psutil/#psutil.Process ''' try: proc = psutil.Process(pid) return proc.as_dict(attrs) except (psutil.NoSuchProcess, psutil.AccessDenied, AttributeError) as exc: raise CommandExecutionError(exc) def kill_pid(pid, signal=15): ''' Kill a process by PID. .. code-block:: bash salt 'minion' ps.kill_pid pid [signal=signal_number] pid PID of process to kill. signal Signal to send to the process. See manpage entry for kill for possible values. Default: 15 (SIGTERM). **Example:** Send SIGKILL to process with PID 2000: .. code-block:: bash salt 'minion' ps.kill_pid 2000 signal=9 ''' try: psutil.Process(pid).send_signal(signal) return True except psutil.NoSuchProcess: return False def pkill(pattern, user=None, signal=15, full=False): ''' Kill processes matching a pattern. .. code-block:: bash salt '*' ps.pkill pattern [user=username] [signal=signal_number] \\ [full=(true|false)] pattern Pattern to search for in the process list. user Limit matches to the given username. Default: All users. signal Signal to send to the process(es). See manpage entry for kill for possible values. Default: 15 (SIGTERM). full A boolean value indicating whether only the name of the command or the full command line should be matched against the pattern. **Examples:** Send SIGHUP to all httpd processes on all 'www' minions: .. code-block:: bash salt 'www.*' ps.pkill httpd signal=1 Send SIGKILL to all bash processes owned by user 'tom': .. code-block:: bash salt '*' ps.pkill bash signal=9 user=tom ''' killed = [] for proc in psutil.process_iter(): name_match = pattern in ' '.join(_get_proc_cmdline(proc)) if full \ else pattern in _get_proc_name(proc) user_match = True if user is None else user == _get_proc_username(proc) if name_match and user_match: try: proc.send_signal(signal) killed.append(_get_proc_pid(proc)) except psutil.NoSuchProcess: pass if not killed: return None else: return {'killed': killed} def pgrep(pattern, user=None, full=False, pattern_is_regex=False): ''' Return the pids for processes matching a pattern. If full is true, the full command line is searched for a match, otherwise only the name of the command is searched. .. code-block:: bash salt '*' ps.pgrep pattern [user=username] [full=(true|false)] pattern Pattern to search for in the process list. user Limit matches to the given username. Default: All users. full A boolean value indicating whether only the name of the command or the full command line should be matched against the pattern. pattern_is_regex This flag enables ps.pgrep to mirror the regex search functionality found in the pgrep command line utility. .. versionadded:: Neon **Examples:** Find all httpd processes on all 'www' minions: .. code-block:: bash salt 'www.*' ps.pgrep httpd Find all bash processes owned by user 'tom': .. code-block:: bash salt '*' ps.pgrep bash user=tom ''' procs = [] if pattern_is_regex: pattern = re.compile(str(pattern)) procs = [] for proc in psutil.process_iter(): if full: process_line = ' '.join(_get_proc_cmdline(proc)) else: process_line = _get_proc_name(proc) if pattern_is_regex: name_match = re.search(pattern, process_line) else: name_match = pattern in process_line user_match = True if user is None else user == _get_proc_username(proc) if name_match and user_match: procs.append(_get_proc_pid(proc)) return procs or None def cpu_percent(interval=0.1, per_cpu=False): ''' Return the percent of time the CPU is busy. interval the number of seconds to sample CPU usage over per_cpu if True return an array of CPU percent busy for each CPU, otherwise aggregate all percents into one number CLI Example: .. code-block:: bash salt '*' ps.cpu_percent ''' if per_cpu: result = list(psutil.cpu_percent(interval, True)) else: result = psutil.cpu_percent(interval) return result def cpu_times(per_cpu=False): ''' Return the percent of time the CPU spends in each state, e.g. user, system, idle, nice, iowait, irq, softirq. per_cpu if True return an array of percents for each CPU, otherwise aggregate all percents into one number CLI Example: .. code-block:: bash salt '*' ps.cpu_times ''' if per_cpu: result = [dict(times._asdict()) for times in psutil.cpu_times(True)] else: result = dict(psutil.cpu_times(per_cpu)._asdict()) return result def virtual_memory(): ''' .. versionadded:: 2014.7.0 Return a dict that describes statistics about system memory usage. .. note:: This function is only available in psutil version 0.6.0 and above. CLI Example: .. code-block:: bash salt '*' ps.virtual_memory ''' if psutil.version_info < (0, 6, 0): msg = 'virtual_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) return dict(psutil.virtual_memory()._asdict()) def swap_memory(): ''' .. versionadded:: 2014.7.0 Return a dict that describes swap memory statistics. .. note:: This function is only available in psutil version 0.6.0 and above. CLI Example: .. code-block:: bash salt '*' ps.swap_memory ''' if psutil.version_info < (0, 6, 0): msg = 'swap_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) return dict(psutil.swap_memory()._asdict()) def disk_partitions(all=False): ''' Return a list of disk partitions and their device, mount point, and filesystem type. all if set to False, only return local, physical partitions (hard disk, USB, CD/DVD partitions). If True, return all filesystems. CLI Example: .. code-block:: bash salt '*' ps.disk_partitions ''' result = [dict(partition._asdict()) for partition in psutil.disk_partitions(all)] return result def disk_usage(path): ''' Given a path, return a dict listing the total available space as well as the free space, and used space. CLI Example: .. code-block:: bash salt '*' ps.disk_usage /home ''' return dict(psutil.disk_usage(path)._asdict()) def disk_partition_usage(all=False): ''' Return a list of disk partitions plus the mount point, filesystem and usage statistics. CLI Example: .. code-block:: bash salt '*' ps.disk_partition_usage ''' result = disk_partitions(all) for partition in result: partition.update(disk_usage(partition['mountpoint'])) return result def total_physical_memory(): ''' Return the total number of bytes of physical memory. CLI Example: .. code-block:: bash salt '*' ps.total_physical_memory ''' if psutil.version_info < (0, 6, 0): msg = 'virtual_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) try: return psutil.virtual_memory().total except AttributeError: # TOTAL_PHYMEM is deprecated but with older psutil versions this is # needed as a fallback. return psutil.TOTAL_PHYMEM def num_cpus(): ''' Return the number of CPUs. CLI Example: .. code-block:: bash salt '*' ps.num_cpus ''' try: return psutil.cpu_count() except AttributeError: # NUM_CPUS is deprecated but with older psutil versions this is needed # as a fallback. return psutil.NUM_CPUS def boot_time(time_format=None): ''' Return the boot time in number of seconds since the epoch began. CLI Example: time_format Optionally specify a `strftime`_ format string. Use ``time_format='%c'`` to get a nicely-formatted locale specific date and time (i.e. ``Fri May 2 19:08:32 2014``). .. _strftime: https://docs.python.org/2/library/datetime.html#strftime-strptime-behavior .. versionadded:: 2014.1.4 .. code-block:: bash salt '*' ps.boot_time ''' try: b_time = int(psutil.boot_time()) except AttributeError: # get_boot_time() has been removed in newer psutil versions, and has # been replaced by boot_time() which provides the same information. b_time = int(psutil.boot_time()) if time_format: # Load epoch timestamp as a datetime.datetime object b_time = datetime.datetime.fromtimestamp(b_time) try: return b_time.strftime(time_format) except TypeError as exc: raise SaltInvocationError('Invalid format string: {0}'.format(exc)) return b_time def network_io_counters(interface=None): ''' Return network I/O statistics. CLI Example: .. code-block:: bash salt '*' ps.network_io_counters salt '*' ps.network_io_counters interface=eth0 ''' if not interface: return dict(psutil.net_io_counters()._asdict()) else: stats = psutil.net_io_counters(pernic=True) if interface in stats: return dict(stats[interface]._asdict()) else: return False def disk_io_counters(device=None): ''' Return disk I/O statistics. CLI Example: .. code-block:: bash salt '*' ps.disk_io_counters salt '*' ps.disk_io_counters device=sda1 ''' if not device: return dict(psutil.disk_io_counters()._asdict()) else: stats = psutil.disk_io_counters(perdisk=True) if device in stats: return dict(stats[device]._asdict()) else: return False def get_users(): ''' Return logged-in users. CLI Example: .. code-block:: bash salt '*' ps.get_users ''' try: recs = psutil.users() return [dict(x._asdict()) for x in recs] except AttributeError: # get_users is only present in psutil > v0.5.0 # try utmp try: import utmp # pylint: disable=import-error result = [] while True: rec = utmp.utmpaccess.getutent() if rec is None: return result elif rec[0] == 7: started = rec[8] if isinstance(started, tuple): started = started[0] result.append({'name': rec[4], 'terminal': rec[2], 'started': started, 'host': rec[5]}) except ImportError: return False def lsof(name): ''' Retrieve the lsof information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.lsof apache2 ''' sanitize_name = six.text_type(name) lsof_infos = __salt__['cmd.run']("lsof -c " + sanitize_name) ret = [] ret.extend([sanitize_name, lsof_infos]) return ret @salt.utils.decorators.path.which('netstat') def netstat(name): ''' Retrieve the netstat information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.netstat apache2 ''' sanitize_name = six.text_type(name) netstat_infos = __salt__['cmd.run']("netstat -nap") found_infos = [] ret = [] for info in netstat_infos.splitlines(): if info.find(sanitize_name) != -1: found_infos.append(info) ret.extend([sanitize_name, found_infos]) return ret @salt.utils.decorators.path.which('ss') def ss(name): ''' Retrieve the ss information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.ss apache2 .. versionadded:: 2016.11.6 ''' sanitize_name = six.text_type(name) ss_infos = __salt__['cmd.run']("ss -neap") found_infos = [] ret = [] for info in ss_infos.splitlines(): if info.find(sanitize_name) != -1: found_infos.append(info) ret.extend([sanitize_name, found_infos]) return ret def psaux(name): ''' Retrieve information corresponding to a "ps aux" filtered with the given pattern. It could be just a name or a regular expression (using python search from "re" module). CLI Example: .. code-block:: bash salt '*' ps.psaux www-data.+apache2 ''' sanitize_name = six.text_type(name) pattern = re.compile(sanitize_name) salt_exception_pattern = re.compile("salt.+ps.psaux.+") ps_aux = __salt__['cmd.run']("ps aux") found_infos = [] ret = [] nb_lines = 0 for info in ps_aux.splitlines(): found = pattern.search(info) if found is not None: # remove 'salt' command from results if not salt_exception_pattern.search(info): nb_lines += 1 found_infos.append(info) pid_count = six.text_type(nb_lines) + " occurence(s)." ret = [] ret.extend([sanitize_name, found_infos, pid_count]) return ret
saltstack/salt
salt/modules/ps.py
top
python
def top(num_processes=5, interval=3): ''' Return a list of top CPU consuming processes during the interval. num_processes = return the top N CPU consuming processes interval = the number of seconds to sample CPU usage over CLI Examples: .. code-block:: bash salt '*' ps.top salt '*' ps.top 5 10 ''' result = [] start_usage = {} for pid in psutil.pids(): try: process = psutil.Process(pid) user, system = process.cpu_times() except ValueError: user, system, _, _ = process.cpu_times() except psutil.NoSuchProcess: continue start_usage[process] = user + system time.sleep(interval) usage = set() for process, start in six.iteritems(start_usage): try: user, system = process.cpu_times() except ValueError: user, system, _, _ = process.cpu_times() except psutil.NoSuchProcess: continue now = user + system diff = now - start usage.add((diff, process)) for idx, (diff, process) in enumerate(reversed(sorted(usage))): if num_processes and idx >= num_processes: break if not _get_proc_cmdline(process): cmdline = _get_proc_name(process) else: cmdline = _get_proc_cmdline(process) info = {'cmd': cmdline, 'user': _get_proc_username(process), 'status': _get_proc_status(process), 'pid': _get_proc_pid(process), 'create_time': _get_proc_create_time(process), 'cpu': {}, 'mem': {}, } for key, value in six.iteritems(process.cpu_times()._asdict()): info['cpu'][key] = value for key, value in six.iteritems(process.memory_info()._asdict()): info['mem'][key] = value result.append(info) return result
Return a list of top CPU consuming processes during the interval. num_processes = return the top N CPU consuming processes interval = the number of seconds to sample CPU usage over CLI Examples: .. code-block:: bash salt '*' ps.top salt '*' ps.top 5 10
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/ps.py#L119-L178
[ "def iteritems(d, **kw):\n return d.iteritems(**kw)\n", "def _get_proc_cmdline(proc):\n '''\n Returns the cmdline of a Process instance.\n\n It's backward compatible with < 2.0 versions of psutil.\n '''\n try:\n return salt.utils.data.decode(proc.cmdline() if PSUTIL2 else proc.cmdline)\n except (psutil.NoSuchProcess, psutil.AccessDenied):\n return []\n", "def _get_proc_create_time(proc):\n '''\n Returns the create_time of a Process instance.\n\n It's backward compatible with < 2.0 versions of psutil.\n '''\n try:\n return salt.utils.data.decode(proc.create_time() if PSUTIL2 else proc.create_time)\n except (psutil.NoSuchProcess, psutil.AccessDenied):\n return None\n", "def _get_proc_name(proc):\n '''\n Returns the name of a Process instance.\n\n It's backward compatible with < 2.0 versions of psutil.\n '''\n try:\n return salt.utils.data.decode(proc.name() if PSUTIL2 else proc.name)\n except (psutil.NoSuchProcess, psutil.AccessDenied):\n return []\n", "def _get_proc_status(proc):\n '''\n Returns the status of a Process instance.\n\n It's backward compatible with < 2.0 versions of psutil.\n '''\n try:\n return salt.utils.data.decode(proc.status() if PSUTIL2 else proc.status)\n except (psutil.NoSuchProcess, psutil.AccessDenied):\n return None\n", "def _get_proc_username(proc):\n '''\n Returns the username of a Process instance.\n\n It's backward compatible with < 2.0 versions of psutil.\n '''\n try:\n return salt.utils.data.decode(proc.username() if PSUTIL2 else proc.username)\n except (psutil.NoSuchProcess, psutil.AccessDenied, KeyError):\n return None\n", "def _get_proc_pid(proc):\n '''\n Returns the pid of a Process instance.\n\n It's backward compatible with < 2.0 versions of psutil.\n '''\n return proc.pid\n" ]
# -*- coding: utf-8 -*- ''' A salt interface to psutil, a system and process library. See http://code.google.com/p/psutil. :depends: - psutil Python module, version 0.3.0 or later - python-utmp package (optional) ''' # Import python libs from __future__ import absolute_import, unicode_literals, print_function import time import datetime import re # Import salt libs import salt.utils.data from salt.exceptions import SaltInvocationError, CommandExecutionError # Import third party libs import salt.utils.decorators.path from salt.ext import six # pylint: disable=import-error try: import salt.utils.psutil_compat as psutil HAS_PSUTIL = True PSUTIL2 = getattr(psutil, 'version_info', ()) >= (2, 0) except ImportError: HAS_PSUTIL = False # pylint: enable=import-error def __virtual__(): if not HAS_PSUTIL: return False, 'The ps module cannot be loaded: python module psutil not installed.' # Functions and attributes used in this execution module seem to have been # added as of psutil 0.3.0, from an inspection of the source code. Only # make this module available if the version of psutil is >= 0.3.0. Note # that this may need to be tweaked if we find post-0.3.0 versions which # also have problems running the functions in this execution module, but # most distributions have already moved to later versions (for example, # as of Dec. 2013 EPEL is on 0.6.1, Debian 7 is on 0.5.1, etc.). if psutil.version_info >= (0, 3, 0): return True return (False, 'The ps execution module cannot be loaded: the psutil python module version {0} is less than 0.3.0'.format(psutil.version_info)) def _get_proc_cmdline(proc): ''' Returns the cmdline of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.cmdline() if PSUTIL2 else proc.cmdline) except (psutil.NoSuchProcess, psutil.AccessDenied): return [] def _get_proc_create_time(proc): ''' Returns the create_time of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.create_time() if PSUTIL2 else proc.create_time) except (psutil.NoSuchProcess, psutil.AccessDenied): return None def _get_proc_name(proc): ''' Returns the name of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.name() if PSUTIL2 else proc.name) except (psutil.NoSuchProcess, psutil.AccessDenied): return [] def _get_proc_status(proc): ''' Returns the status of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.status() if PSUTIL2 else proc.status) except (psutil.NoSuchProcess, psutil.AccessDenied): return None def _get_proc_username(proc): ''' Returns the username of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.username() if PSUTIL2 else proc.username) except (psutil.NoSuchProcess, psutil.AccessDenied, KeyError): return None def _get_proc_pid(proc): ''' Returns the pid of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' return proc.pid def get_pid_list(): ''' Return a list of process ids (PIDs) for all running processes. CLI Example: .. code-block:: bash salt '*' ps.get_pid_list ''' return psutil.pids() def proc_info(pid, attrs=None): ''' Return a dictionary of information for a process id (PID). CLI Example: .. code-block:: bash salt '*' ps.proc_info 2322 salt '*' ps.proc_info 2322 attrs='["pid", "name"]' pid PID of process to query. attrs Optional list of desired process attributes. The list of possible attributes can be found here: http://pythonhosted.org/psutil/#psutil.Process ''' try: proc = psutil.Process(pid) return proc.as_dict(attrs) except (psutil.NoSuchProcess, psutil.AccessDenied, AttributeError) as exc: raise CommandExecutionError(exc) def kill_pid(pid, signal=15): ''' Kill a process by PID. .. code-block:: bash salt 'minion' ps.kill_pid pid [signal=signal_number] pid PID of process to kill. signal Signal to send to the process. See manpage entry for kill for possible values. Default: 15 (SIGTERM). **Example:** Send SIGKILL to process with PID 2000: .. code-block:: bash salt 'minion' ps.kill_pid 2000 signal=9 ''' try: psutil.Process(pid).send_signal(signal) return True except psutil.NoSuchProcess: return False def pkill(pattern, user=None, signal=15, full=False): ''' Kill processes matching a pattern. .. code-block:: bash salt '*' ps.pkill pattern [user=username] [signal=signal_number] \\ [full=(true|false)] pattern Pattern to search for in the process list. user Limit matches to the given username. Default: All users. signal Signal to send to the process(es). See manpage entry for kill for possible values. Default: 15 (SIGTERM). full A boolean value indicating whether only the name of the command or the full command line should be matched against the pattern. **Examples:** Send SIGHUP to all httpd processes on all 'www' minions: .. code-block:: bash salt 'www.*' ps.pkill httpd signal=1 Send SIGKILL to all bash processes owned by user 'tom': .. code-block:: bash salt '*' ps.pkill bash signal=9 user=tom ''' killed = [] for proc in psutil.process_iter(): name_match = pattern in ' '.join(_get_proc_cmdline(proc)) if full \ else pattern in _get_proc_name(proc) user_match = True if user is None else user == _get_proc_username(proc) if name_match and user_match: try: proc.send_signal(signal) killed.append(_get_proc_pid(proc)) except psutil.NoSuchProcess: pass if not killed: return None else: return {'killed': killed} def pgrep(pattern, user=None, full=False, pattern_is_regex=False): ''' Return the pids for processes matching a pattern. If full is true, the full command line is searched for a match, otherwise only the name of the command is searched. .. code-block:: bash salt '*' ps.pgrep pattern [user=username] [full=(true|false)] pattern Pattern to search for in the process list. user Limit matches to the given username. Default: All users. full A boolean value indicating whether only the name of the command or the full command line should be matched against the pattern. pattern_is_regex This flag enables ps.pgrep to mirror the regex search functionality found in the pgrep command line utility. .. versionadded:: Neon **Examples:** Find all httpd processes on all 'www' minions: .. code-block:: bash salt 'www.*' ps.pgrep httpd Find all bash processes owned by user 'tom': .. code-block:: bash salt '*' ps.pgrep bash user=tom ''' procs = [] if pattern_is_regex: pattern = re.compile(str(pattern)) procs = [] for proc in psutil.process_iter(): if full: process_line = ' '.join(_get_proc_cmdline(proc)) else: process_line = _get_proc_name(proc) if pattern_is_regex: name_match = re.search(pattern, process_line) else: name_match = pattern in process_line user_match = True if user is None else user == _get_proc_username(proc) if name_match and user_match: procs.append(_get_proc_pid(proc)) return procs or None def cpu_percent(interval=0.1, per_cpu=False): ''' Return the percent of time the CPU is busy. interval the number of seconds to sample CPU usage over per_cpu if True return an array of CPU percent busy for each CPU, otherwise aggregate all percents into one number CLI Example: .. code-block:: bash salt '*' ps.cpu_percent ''' if per_cpu: result = list(psutil.cpu_percent(interval, True)) else: result = psutil.cpu_percent(interval) return result def cpu_times(per_cpu=False): ''' Return the percent of time the CPU spends in each state, e.g. user, system, idle, nice, iowait, irq, softirq. per_cpu if True return an array of percents for each CPU, otherwise aggregate all percents into one number CLI Example: .. code-block:: bash salt '*' ps.cpu_times ''' if per_cpu: result = [dict(times._asdict()) for times in psutil.cpu_times(True)] else: result = dict(psutil.cpu_times(per_cpu)._asdict()) return result def virtual_memory(): ''' .. versionadded:: 2014.7.0 Return a dict that describes statistics about system memory usage. .. note:: This function is only available in psutil version 0.6.0 and above. CLI Example: .. code-block:: bash salt '*' ps.virtual_memory ''' if psutil.version_info < (0, 6, 0): msg = 'virtual_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) return dict(psutil.virtual_memory()._asdict()) def swap_memory(): ''' .. versionadded:: 2014.7.0 Return a dict that describes swap memory statistics. .. note:: This function is only available in psutil version 0.6.0 and above. CLI Example: .. code-block:: bash salt '*' ps.swap_memory ''' if psutil.version_info < (0, 6, 0): msg = 'swap_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) return dict(psutil.swap_memory()._asdict()) def disk_partitions(all=False): ''' Return a list of disk partitions and their device, mount point, and filesystem type. all if set to False, only return local, physical partitions (hard disk, USB, CD/DVD partitions). If True, return all filesystems. CLI Example: .. code-block:: bash salt '*' ps.disk_partitions ''' result = [dict(partition._asdict()) for partition in psutil.disk_partitions(all)] return result def disk_usage(path): ''' Given a path, return a dict listing the total available space as well as the free space, and used space. CLI Example: .. code-block:: bash salt '*' ps.disk_usage /home ''' return dict(psutil.disk_usage(path)._asdict()) def disk_partition_usage(all=False): ''' Return a list of disk partitions plus the mount point, filesystem and usage statistics. CLI Example: .. code-block:: bash salt '*' ps.disk_partition_usage ''' result = disk_partitions(all) for partition in result: partition.update(disk_usage(partition['mountpoint'])) return result def total_physical_memory(): ''' Return the total number of bytes of physical memory. CLI Example: .. code-block:: bash salt '*' ps.total_physical_memory ''' if psutil.version_info < (0, 6, 0): msg = 'virtual_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) try: return psutil.virtual_memory().total except AttributeError: # TOTAL_PHYMEM is deprecated but with older psutil versions this is # needed as a fallback. return psutil.TOTAL_PHYMEM def num_cpus(): ''' Return the number of CPUs. CLI Example: .. code-block:: bash salt '*' ps.num_cpus ''' try: return psutil.cpu_count() except AttributeError: # NUM_CPUS is deprecated but with older psutil versions this is needed # as a fallback. return psutil.NUM_CPUS def boot_time(time_format=None): ''' Return the boot time in number of seconds since the epoch began. CLI Example: time_format Optionally specify a `strftime`_ format string. Use ``time_format='%c'`` to get a nicely-formatted locale specific date and time (i.e. ``Fri May 2 19:08:32 2014``). .. _strftime: https://docs.python.org/2/library/datetime.html#strftime-strptime-behavior .. versionadded:: 2014.1.4 .. code-block:: bash salt '*' ps.boot_time ''' try: b_time = int(psutil.boot_time()) except AttributeError: # get_boot_time() has been removed in newer psutil versions, and has # been replaced by boot_time() which provides the same information. b_time = int(psutil.boot_time()) if time_format: # Load epoch timestamp as a datetime.datetime object b_time = datetime.datetime.fromtimestamp(b_time) try: return b_time.strftime(time_format) except TypeError as exc: raise SaltInvocationError('Invalid format string: {0}'.format(exc)) return b_time def network_io_counters(interface=None): ''' Return network I/O statistics. CLI Example: .. code-block:: bash salt '*' ps.network_io_counters salt '*' ps.network_io_counters interface=eth0 ''' if not interface: return dict(psutil.net_io_counters()._asdict()) else: stats = psutil.net_io_counters(pernic=True) if interface in stats: return dict(stats[interface]._asdict()) else: return False def disk_io_counters(device=None): ''' Return disk I/O statistics. CLI Example: .. code-block:: bash salt '*' ps.disk_io_counters salt '*' ps.disk_io_counters device=sda1 ''' if not device: return dict(psutil.disk_io_counters()._asdict()) else: stats = psutil.disk_io_counters(perdisk=True) if device in stats: return dict(stats[device]._asdict()) else: return False def get_users(): ''' Return logged-in users. CLI Example: .. code-block:: bash salt '*' ps.get_users ''' try: recs = psutil.users() return [dict(x._asdict()) for x in recs] except AttributeError: # get_users is only present in psutil > v0.5.0 # try utmp try: import utmp # pylint: disable=import-error result = [] while True: rec = utmp.utmpaccess.getutent() if rec is None: return result elif rec[0] == 7: started = rec[8] if isinstance(started, tuple): started = started[0] result.append({'name': rec[4], 'terminal': rec[2], 'started': started, 'host': rec[5]}) except ImportError: return False def lsof(name): ''' Retrieve the lsof information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.lsof apache2 ''' sanitize_name = six.text_type(name) lsof_infos = __salt__['cmd.run']("lsof -c " + sanitize_name) ret = [] ret.extend([sanitize_name, lsof_infos]) return ret @salt.utils.decorators.path.which('netstat') def netstat(name): ''' Retrieve the netstat information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.netstat apache2 ''' sanitize_name = six.text_type(name) netstat_infos = __salt__['cmd.run']("netstat -nap") found_infos = [] ret = [] for info in netstat_infos.splitlines(): if info.find(sanitize_name) != -1: found_infos.append(info) ret.extend([sanitize_name, found_infos]) return ret @salt.utils.decorators.path.which('ss') def ss(name): ''' Retrieve the ss information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.ss apache2 .. versionadded:: 2016.11.6 ''' sanitize_name = six.text_type(name) ss_infos = __salt__['cmd.run']("ss -neap") found_infos = [] ret = [] for info in ss_infos.splitlines(): if info.find(sanitize_name) != -1: found_infos.append(info) ret.extend([sanitize_name, found_infos]) return ret def psaux(name): ''' Retrieve information corresponding to a "ps aux" filtered with the given pattern. It could be just a name or a regular expression (using python search from "re" module). CLI Example: .. code-block:: bash salt '*' ps.psaux www-data.+apache2 ''' sanitize_name = six.text_type(name) pattern = re.compile(sanitize_name) salt_exception_pattern = re.compile("salt.+ps.psaux.+") ps_aux = __salt__['cmd.run']("ps aux") found_infos = [] ret = [] nb_lines = 0 for info in ps_aux.splitlines(): found = pattern.search(info) if found is not None: # remove 'salt' command from results if not salt_exception_pattern.search(info): nb_lines += 1 found_infos.append(info) pid_count = six.text_type(nb_lines) + " occurence(s)." ret = [] ret.extend([sanitize_name, found_infos, pid_count]) return ret
saltstack/salt
salt/modules/ps.py
proc_info
python
def proc_info(pid, attrs=None): ''' Return a dictionary of information for a process id (PID). CLI Example: .. code-block:: bash salt '*' ps.proc_info 2322 salt '*' ps.proc_info 2322 attrs='["pid", "name"]' pid PID of process to query. attrs Optional list of desired process attributes. The list of possible attributes can be found here: http://pythonhosted.org/psutil/#psutil.Process ''' try: proc = psutil.Process(pid) return proc.as_dict(attrs) except (psutil.NoSuchProcess, psutil.AccessDenied, AttributeError) as exc: raise CommandExecutionError(exc)
Return a dictionary of information for a process id (PID). CLI Example: .. code-block:: bash salt '*' ps.proc_info 2322 salt '*' ps.proc_info 2322 attrs='["pid", "name"]' pid PID of process to query. attrs Optional list of desired process attributes. The list of possible attributes can be found here: http://pythonhosted.org/psutil/#psutil.Process
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/ps.py#L194-L217
null
# -*- coding: utf-8 -*- ''' A salt interface to psutil, a system and process library. See http://code.google.com/p/psutil. :depends: - psutil Python module, version 0.3.0 or later - python-utmp package (optional) ''' # Import python libs from __future__ import absolute_import, unicode_literals, print_function import time import datetime import re # Import salt libs import salt.utils.data from salt.exceptions import SaltInvocationError, CommandExecutionError # Import third party libs import salt.utils.decorators.path from salt.ext import six # pylint: disable=import-error try: import salt.utils.psutil_compat as psutil HAS_PSUTIL = True PSUTIL2 = getattr(psutil, 'version_info', ()) >= (2, 0) except ImportError: HAS_PSUTIL = False # pylint: enable=import-error def __virtual__(): if not HAS_PSUTIL: return False, 'The ps module cannot be loaded: python module psutil not installed.' # Functions and attributes used in this execution module seem to have been # added as of psutil 0.3.0, from an inspection of the source code. Only # make this module available if the version of psutil is >= 0.3.0. Note # that this may need to be tweaked if we find post-0.3.0 versions which # also have problems running the functions in this execution module, but # most distributions have already moved to later versions (for example, # as of Dec. 2013 EPEL is on 0.6.1, Debian 7 is on 0.5.1, etc.). if psutil.version_info >= (0, 3, 0): return True return (False, 'The ps execution module cannot be loaded: the psutil python module version {0} is less than 0.3.0'.format(psutil.version_info)) def _get_proc_cmdline(proc): ''' Returns the cmdline of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.cmdline() if PSUTIL2 else proc.cmdline) except (psutil.NoSuchProcess, psutil.AccessDenied): return [] def _get_proc_create_time(proc): ''' Returns the create_time of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.create_time() if PSUTIL2 else proc.create_time) except (psutil.NoSuchProcess, psutil.AccessDenied): return None def _get_proc_name(proc): ''' Returns the name of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.name() if PSUTIL2 else proc.name) except (psutil.NoSuchProcess, psutil.AccessDenied): return [] def _get_proc_status(proc): ''' Returns the status of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.status() if PSUTIL2 else proc.status) except (psutil.NoSuchProcess, psutil.AccessDenied): return None def _get_proc_username(proc): ''' Returns the username of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.username() if PSUTIL2 else proc.username) except (psutil.NoSuchProcess, psutil.AccessDenied, KeyError): return None def _get_proc_pid(proc): ''' Returns the pid of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' return proc.pid def top(num_processes=5, interval=3): ''' Return a list of top CPU consuming processes during the interval. num_processes = return the top N CPU consuming processes interval = the number of seconds to sample CPU usage over CLI Examples: .. code-block:: bash salt '*' ps.top salt '*' ps.top 5 10 ''' result = [] start_usage = {} for pid in psutil.pids(): try: process = psutil.Process(pid) user, system = process.cpu_times() except ValueError: user, system, _, _ = process.cpu_times() except psutil.NoSuchProcess: continue start_usage[process] = user + system time.sleep(interval) usage = set() for process, start in six.iteritems(start_usage): try: user, system = process.cpu_times() except ValueError: user, system, _, _ = process.cpu_times() except psutil.NoSuchProcess: continue now = user + system diff = now - start usage.add((diff, process)) for idx, (diff, process) in enumerate(reversed(sorted(usage))): if num_processes and idx >= num_processes: break if not _get_proc_cmdline(process): cmdline = _get_proc_name(process) else: cmdline = _get_proc_cmdline(process) info = {'cmd': cmdline, 'user': _get_proc_username(process), 'status': _get_proc_status(process), 'pid': _get_proc_pid(process), 'create_time': _get_proc_create_time(process), 'cpu': {}, 'mem': {}, } for key, value in six.iteritems(process.cpu_times()._asdict()): info['cpu'][key] = value for key, value in six.iteritems(process.memory_info()._asdict()): info['mem'][key] = value result.append(info) return result def get_pid_list(): ''' Return a list of process ids (PIDs) for all running processes. CLI Example: .. code-block:: bash salt '*' ps.get_pid_list ''' return psutil.pids() def kill_pid(pid, signal=15): ''' Kill a process by PID. .. code-block:: bash salt 'minion' ps.kill_pid pid [signal=signal_number] pid PID of process to kill. signal Signal to send to the process. See manpage entry for kill for possible values. Default: 15 (SIGTERM). **Example:** Send SIGKILL to process with PID 2000: .. code-block:: bash salt 'minion' ps.kill_pid 2000 signal=9 ''' try: psutil.Process(pid).send_signal(signal) return True except psutil.NoSuchProcess: return False def pkill(pattern, user=None, signal=15, full=False): ''' Kill processes matching a pattern. .. code-block:: bash salt '*' ps.pkill pattern [user=username] [signal=signal_number] \\ [full=(true|false)] pattern Pattern to search for in the process list. user Limit matches to the given username. Default: All users. signal Signal to send to the process(es). See manpage entry for kill for possible values. Default: 15 (SIGTERM). full A boolean value indicating whether only the name of the command or the full command line should be matched against the pattern. **Examples:** Send SIGHUP to all httpd processes on all 'www' minions: .. code-block:: bash salt 'www.*' ps.pkill httpd signal=1 Send SIGKILL to all bash processes owned by user 'tom': .. code-block:: bash salt '*' ps.pkill bash signal=9 user=tom ''' killed = [] for proc in psutil.process_iter(): name_match = pattern in ' '.join(_get_proc_cmdline(proc)) if full \ else pattern in _get_proc_name(proc) user_match = True if user is None else user == _get_proc_username(proc) if name_match and user_match: try: proc.send_signal(signal) killed.append(_get_proc_pid(proc)) except psutil.NoSuchProcess: pass if not killed: return None else: return {'killed': killed} def pgrep(pattern, user=None, full=False, pattern_is_regex=False): ''' Return the pids for processes matching a pattern. If full is true, the full command line is searched for a match, otherwise only the name of the command is searched. .. code-block:: bash salt '*' ps.pgrep pattern [user=username] [full=(true|false)] pattern Pattern to search for in the process list. user Limit matches to the given username. Default: All users. full A boolean value indicating whether only the name of the command or the full command line should be matched against the pattern. pattern_is_regex This flag enables ps.pgrep to mirror the regex search functionality found in the pgrep command line utility. .. versionadded:: Neon **Examples:** Find all httpd processes on all 'www' minions: .. code-block:: bash salt 'www.*' ps.pgrep httpd Find all bash processes owned by user 'tom': .. code-block:: bash salt '*' ps.pgrep bash user=tom ''' procs = [] if pattern_is_regex: pattern = re.compile(str(pattern)) procs = [] for proc in psutil.process_iter(): if full: process_line = ' '.join(_get_proc_cmdline(proc)) else: process_line = _get_proc_name(proc) if pattern_is_regex: name_match = re.search(pattern, process_line) else: name_match = pattern in process_line user_match = True if user is None else user == _get_proc_username(proc) if name_match and user_match: procs.append(_get_proc_pid(proc)) return procs or None def cpu_percent(interval=0.1, per_cpu=False): ''' Return the percent of time the CPU is busy. interval the number of seconds to sample CPU usage over per_cpu if True return an array of CPU percent busy for each CPU, otherwise aggregate all percents into one number CLI Example: .. code-block:: bash salt '*' ps.cpu_percent ''' if per_cpu: result = list(psutil.cpu_percent(interval, True)) else: result = psutil.cpu_percent(interval) return result def cpu_times(per_cpu=False): ''' Return the percent of time the CPU spends in each state, e.g. user, system, idle, nice, iowait, irq, softirq. per_cpu if True return an array of percents for each CPU, otherwise aggregate all percents into one number CLI Example: .. code-block:: bash salt '*' ps.cpu_times ''' if per_cpu: result = [dict(times._asdict()) for times in psutil.cpu_times(True)] else: result = dict(psutil.cpu_times(per_cpu)._asdict()) return result def virtual_memory(): ''' .. versionadded:: 2014.7.0 Return a dict that describes statistics about system memory usage. .. note:: This function is only available in psutil version 0.6.0 and above. CLI Example: .. code-block:: bash salt '*' ps.virtual_memory ''' if psutil.version_info < (0, 6, 0): msg = 'virtual_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) return dict(psutil.virtual_memory()._asdict()) def swap_memory(): ''' .. versionadded:: 2014.7.0 Return a dict that describes swap memory statistics. .. note:: This function is only available in psutil version 0.6.0 and above. CLI Example: .. code-block:: bash salt '*' ps.swap_memory ''' if psutil.version_info < (0, 6, 0): msg = 'swap_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) return dict(psutil.swap_memory()._asdict()) def disk_partitions(all=False): ''' Return a list of disk partitions and their device, mount point, and filesystem type. all if set to False, only return local, physical partitions (hard disk, USB, CD/DVD partitions). If True, return all filesystems. CLI Example: .. code-block:: bash salt '*' ps.disk_partitions ''' result = [dict(partition._asdict()) for partition in psutil.disk_partitions(all)] return result def disk_usage(path): ''' Given a path, return a dict listing the total available space as well as the free space, and used space. CLI Example: .. code-block:: bash salt '*' ps.disk_usage /home ''' return dict(psutil.disk_usage(path)._asdict()) def disk_partition_usage(all=False): ''' Return a list of disk partitions plus the mount point, filesystem and usage statistics. CLI Example: .. code-block:: bash salt '*' ps.disk_partition_usage ''' result = disk_partitions(all) for partition in result: partition.update(disk_usage(partition['mountpoint'])) return result def total_physical_memory(): ''' Return the total number of bytes of physical memory. CLI Example: .. code-block:: bash salt '*' ps.total_physical_memory ''' if psutil.version_info < (0, 6, 0): msg = 'virtual_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) try: return psutil.virtual_memory().total except AttributeError: # TOTAL_PHYMEM is deprecated but with older psutil versions this is # needed as a fallback. return psutil.TOTAL_PHYMEM def num_cpus(): ''' Return the number of CPUs. CLI Example: .. code-block:: bash salt '*' ps.num_cpus ''' try: return psutil.cpu_count() except AttributeError: # NUM_CPUS is deprecated but with older psutil versions this is needed # as a fallback. return psutil.NUM_CPUS def boot_time(time_format=None): ''' Return the boot time in number of seconds since the epoch began. CLI Example: time_format Optionally specify a `strftime`_ format string. Use ``time_format='%c'`` to get a nicely-formatted locale specific date and time (i.e. ``Fri May 2 19:08:32 2014``). .. _strftime: https://docs.python.org/2/library/datetime.html#strftime-strptime-behavior .. versionadded:: 2014.1.4 .. code-block:: bash salt '*' ps.boot_time ''' try: b_time = int(psutil.boot_time()) except AttributeError: # get_boot_time() has been removed in newer psutil versions, and has # been replaced by boot_time() which provides the same information. b_time = int(psutil.boot_time()) if time_format: # Load epoch timestamp as a datetime.datetime object b_time = datetime.datetime.fromtimestamp(b_time) try: return b_time.strftime(time_format) except TypeError as exc: raise SaltInvocationError('Invalid format string: {0}'.format(exc)) return b_time def network_io_counters(interface=None): ''' Return network I/O statistics. CLI Example: .. code-block:: bash salt '*' ps.network_io_counters salt '*' ps.network_io_counters interface=eth0 ''' if not interface: return dict(psutil.net_io_counters()._asdict()) else: stats = psutil.net_io_counters(pernic=True) if interface in stats: return dict(stats[interface]._asdict()) else: return False def disk_io_counters(device=None): ''' Return disk I/O statistics. CLI Example: .. code-block:: bash salt '*' ps.disk_io_counters salt '*' ps.disk_io_counters device=sda1 ''' if not device: return dict(psutil.disk_io_counters()._asdict()) else: stats = psutil.disk_io_counters(perdisk=True) if device in stats: return dict(stats[device]._asdict()) else: return False def get_users(): ''' Return logged-in users. CLI Example: .. code-block:: bash salt '*' ps.get_users ''' try: recs = psutil.users() return [dict(x._asdict()) for x in recs] except AttributeError: # get_users is only present in psutil > v0.5.0 # try utmp try: import utmp # pylint: disable=import-error result = [] while True: rec = utmp.utmpaccess.getutent() if rec is None: return result elif rec[0] == 7: started = rec[8] if isinstance(started, tuple): started = started[0] result.append({'name': rec[4], 'terminal': rec[2], 'started': started, 'host': rec[5]}) except ImportError: return False def lsof(name): ''' Retrieve the lsof information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.lsof apache2 ''' sanitize_name = six.text_type(name) lsof_infos = __salt__['cmd.run']("lsof -c " + sanitize_name) ret = [] ret.extend([sanitize_name, lsof_infos]) return ret @salt.utils.decorators.path.which('netstat') def netstat(name): ''' Retrieve the netstat information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.netstat apache2 ''' sanitize_name = six.text_type(name) netstat_infos = __salt__['cmd.run']("netstat -nap") found_infos = [] ret = [] for info in netstat_infos.splitlines(): if info.find(sanitize_name) != -1: found_infos.append(info) ret.extend([sanitize_name, found_infos]) return ret @salt.utils.decorators.path.which('ss') def ss(name): ''' Retrieve the ss information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.ss apache2 .. versionadded:: 2016.11.6 ''' sanitize_name = six.text_type(name) ss_infos = __salt__['cmd.run']("ss -neap") found_infos = [] ret = [] for info in ss_infos.splitlines(): if info.find(sanitize_name) != -1: found_infos.append(info) ret.extend([sanitize_name, found_infos]) return ret def psaux(name): ''' Retrieve information corresponding to a "ps aux" filtered with the given pattern. It could be just a name or a regular expression (using python search from "re" module). CLI Example: .. code-block:: bash salt '*' ps.psaux www-data.+apache2 ''' sanitize_name = six.text_type(name) pattern = re.compile(sanitize_name) salt_exception_pattern = re.compile("salt.+ps.psaux.+") ps_aux = __salt__['cmd.run']("ps aux") found_infos = [] ret = [] nb_lines = 0 for info in ps_aux.splitlines(): found = pattern.search(info) if found is not None: # remove 'salt' command from results if not salt_exception_pattern.search(info): nb_lines += 1 found_infos.append(info) pid_count = six.text_type(nb_lines) + " occurence(s)." ret = [] ret.extend([sanitize_name, found_infos, pid_count]) return ret
saltstack/salt
salt/modules/ps.py
kill_pid
python
def kill_pid(pid, signal=15): ''' Kill a process by PID. .. code-block:: bash salt 'minion' ps.kill_pid pid [signal=signal_number] pid PID of process to kill. signal Signal to send to the process. See manpage entry for kill for possible values. Default: 15 (SIGTERM). **Example:** Send SIGKILL to process with PID 2000: .. code-block:: bash salt 'minion' ps.kill_pid 2000 signal=9 ''' try: psutil.Process(pid).send_signal(signal) return True except psutil.NoSuchProcess: return False
Kill a process by PID. .. code-block:: bash salt 'minion' ps.kill_pid pid [signal=signal_number] pid PID of process to kill. signal Signal to send to the process. See manpage entry for kill for possible values. Default: 15 (SIGTERM). **Example:** Send SIGKILL to process with PID 2000: .. code-block:: bash salt 'minion' ps.kill_pid 2000 signal=9
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/ps.py#L220-L247
null
# -*- coding: utf-8 -*- ''' A salt interface to psutil, a system and process library. See http://code.google.com/p/psutil. :depends: - psutil Python module, version 0.3.0 or later - python-utmp package (optional) ''' # Import python libs from __future__ import absolute_import, unicode_literals, print_function import time import datetime import re # Import salt libs import salt.utils.data from salt.exceptions import SaltInvocationError, CommandExecutionError # Import third party libs import salt.utils.decorators.path from salt.ext import six # pylint: disable=import-error try: import salt.utils.psutil_compat as psutil HAS_PSUTIL = True PSUTIL2 = getattr(psutil, 'version_info', ()) >= (2, 0) except ImportError: HAS_PSUTIL = False # pylint: enable=import-error def __virtual__(): if not HAS_PSUTIL: return False, 'The ps module cannot be loaded: python module psutil not installed.' # Functions and attributes used in this execution module seem to have been # added as of psutil 0.3.0, from an inspection of the source code. Only # make this module available if the version of psutil is >= 0.3.0. Note # that this may need to be tweaked if we find post-0.3.0 versions which # also have problems running the functions in this execution module, but # most distributions have already moved to later versions (for example, # as of Dec. 2013 EPEL is on 0.6.1, Debian 7 is on 0.5.1, etc.). if psutil.version_info >= (0, 3, 0): return True return (False, 'The ps execution module cannot be loaded: the psutil python module version {0} is less than 0.3.0'.format(psutil.version_info)) def _get_proc_cmdline(proc): ''' Returns the cmdline of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.cmdline() if PSUTIL2 else proc.cmdline) except (psutil.NoSuchProcess, psutil.AccessDenied): return [] def _get_proc_create_time(proc): ''' Returns the create_time of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.create_time() if PSUTIL2 else proc.create_time) except (psutil.NoSuchProcess, psutil.AccessDenied): return None def _get_proc_name(proc): ''' Returns the name of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.name() if PSUTIL2 else proc.name) except (psutil.NoSuchProcess, psutil.AccessDenied): return [] def _get_proc_status(proc): ''' Returns the status of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.status() if PSUTIL2 else proc.status) except (psutil.NoSuchProcess, psutil.AccessDenied): return None def _get_proc_username(proc): ''' Returns the username of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.username() if PSUTIL2 else proc.username) except (psutil.NoSuchProcess, psutil.AccessDenied, KeyError): return None def _get_proc_pid(proc): ''' Returns the pid of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' return proc.pid def top(num_processes=5, interval=3): ''' Return a list of top CPU consuming processes during the interval. num_processes = return the top N CPU consuming processes interval = the number of seconds to sample CPU usage over CLI Examples: .. code-block:: bash salt '*' ps.top salt '*' ps.top 5 10 ''' result = [] start_usage = {} for pid in psutil.pids(): try: process = psutil.Process(pid) user, system = process.cpu_times() except ValueError: user, system, _, _ = process.cpu_times() except psutil.NoSuchProcess: continue start_usage[process] = user + system time.sleep(interval) usage = set() for process, start in six.iteritems(start_usage): try: user, system = process.cpu_times() except ValueError: user, system, _, _ = process.cpu_times() except psutil.NoSuchProcess: continue now = user + system diff = now - start usage.add((diff, process)) for idx, (diff, process) in enumerate(reversed(sorted(usage))): if num_processes and idx >= num_processes: break if not _get_proc_cmdline(process): cmdline = _get_proc_name(process) else: cmdline = _get_proc_cmdline(process) info = {'cmd': cmdline, 'user': _get_proc_username(process), 'status': _get_proc_status(process), 'pid': _get_proc_pid(process), 'create_time': _get_proc_create_time(process), 'cpu': {}, 'mem': {}, } for key, value in six.iteritems(process.cpu_times()._asdict()): info['cpu'][key] = value for key, value in six.iteritems(process.memory_info()._asdict()): info['mem'][key] = value result.append(info) return result def get_pid_list(): ''' Return a list of process ids (PIDs) for all running processes. CLI Example: .. code-block:: bash salt '*' ps.get_pid_list ''' return psutil.pids() def proc_info(pid, attrs=None): ''' Return a dictionary of information for a process id (PID). CLI Example: .. code-block:: bash salt '*' ps.proc_info 2322 salt '*' ps.proc_info 2322 attrs='["pid", "name"]' pid PID of process to query. attrs Optional list of desired process attributes. The list of possible attributes can be found here: http://pythonhosted.org/psutil/#psutil.Process ''' try: proc = psutil.Process(pid) return proc.as_dict(attrs) except (psutil.NoSuchProcess, psutil.AccessDenied, AttributeError) as exc: raise CommandExecutionError(exc) def pkill(pattern, user=None, signal=15, full=False): ''' Kill processes matching a pattern. .. code-block:: bash salt '*' ps.pkill pattern [user=username] [signal=signal_number] \\ [full=(true|false)] pattern Pattern to search for in the process list. user Limit matches to the given username. Default: All users. signal Signal to send to the process(es). See manpage entry for kill for possible values. Default: 15 (SIGTERM). full A boolean value indicating whether only the name of the command or the full command line should be matched against the pattern. **Examples:** Send SIGHUP to all httpd processes on all 'www' minions: .. code-block:: bash salt 'www.*' ps.pkill httpd signal=1 Send SIGKILL to all bash processes owned by user 'tom': .. code-block:: bash salt '*' ps.pkill bash signal=9 user=tom ''' killed = [] for proc in psutil.process_iter(): name_match = pattern in ' '.join(_get_proc_cmdline(proc)) if full \ else pattern in _get_proc_name(proc) user_match = True if user is None else user == _get_proc_username(proc) if name_match and user_match: try: proc.send_signal(signal) killed.append(_get_proc_pid(proc)) except psutil.NoSuchProcess: pass if not killed: return None else: return {'killed': killed} def pgrep(pattern, user=None, full=False, pattern_is_regex=False): ''' Return the pids for processes matching a pattern. If full is true, the full command line is searched for a match, otherwise only the name of the command is searched. .. code-block:: bash salt '*' ps.pgrep pattern [user=username] [full=(true|false)] pattern Pattern to search for in the process list. user Limit matches to the given username. Default: All users. full A boolean value indicating whether only the name of the command or the full command line should be matched against the pattern. pattern_is_regex This flag enables ps.pgrep to mirror the regex search functionality found in the pgrep command line utility. .. versionadded:: Neon **Examples:** Find all httpd processes on all 'www' minions: .. code-block:: bash salt 'www.*' ps.pgrep httpd Find all bash processes owned by user 'tom': .. code-block:: bash salt '*' ps.pgrep bash user=tom ''' procs = [] if pattern_is_regex: pattern = re.compile(str(pattern)) procs = [] for proc in psutil.process_iter(): if full: process_line = ' '.join(_get_proc_cmdline(proc)) else: process_line = _get_proc_name(proc) if pattern_is_regex: name_match = re.search(pattern, process_line) else: name_match = pattern in process_line user_match = True if user is None else user == _get_proc_username(proc) if name_match and user_match: procs.append(_get_proc_pid(proc)) return procs or None def cpu_percent(interval=0.1, per_cpu=False): ''' Return the percent of time the CPU is busy. interval the number of seconds to sample CPU usage over per_cpu if True return an array of CPU percent busy for each CPU, otherwise aggregate all percents into one number CLI Example: .. code-block:: bash salt '*' ps.cpu_percent ''' if per_cpu: result = list(psutil.cpu_percent(interval, True)) else: result = psutil.cpu_percent(interval) return result def cpu_times(per_cpu=False): ''' Return the percent of time the CPU spends in each state, e.g. user, system, idle, nice, iowait, irq, softirq. per_cpu if True return an array of percents for each CPU, otherwise aggregate all percents into one number CLI Example: .. code-block:: bash salt '*' ps.cpu_times ''' if per_cpu: result = [dict(times._asdict()) for times in psutil.cpu_times(True)] else: result = dict(psutil.cpu_times(per_cpu)._asdict()) return result def virtual_memory(): ''' .. versionadded:: 2014.7.0 Return a dict that describes statistics about system memory usage. .. note:: This function is only available in psutil version 0.6.0 and above. CLI Example: .. code-block:: bash salt '*' ps.virtual_memory ''' if psutil.version_info < (0, 6, 0): msg = 'virtual_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) return dict(psutil.virtual_memory()._asdict()) def swap_memory(): ''' .. versionadded:: 2014.7.0 Return a dict that describes swap memory statistics. .. note:: This function is only available in psutil version 0.6.0 and above. CLI Example: .. code-block:: bash salt '*' ps.swap_memory ''' if psutil.version_info < (0, 6, 0): msg = 'swap_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) return dict(psutil.swap_memory()._asdict()) def disk_partitions(all=False): ''' Return a list of disk partitions and their device, mount point, and filesystem type. all if set to False, only return local, physical partitions (hard disk, USB, CD/DVD partitions). If True, return all filesystems. CLI Example: .. code-block:: bash salt '*' ps.disk_partitions ''' result = [dict(partition._asdict()) for partition in psutil.disk_partitions(all)] return result def disk_usage(path): ''' Given a path, return a dict listing the total available space as well as the free space, and used space. CLI Example: .. code-block:: bash salt '*' ps.disk_usage /home ''' return dict(psutil.disk_usage(path)._asdict()) def disk_partition_usage(all=False): ''' Return a list of disk partitions plus the mount point, filesystem and usage statistics. CLI Example: .. code-block:: bash salt '*' ps.disk_partition_usage ''' result = disk_partitions(all) for partition in result: partition.update(disk_usage(partition['mountpoint'])) return result def total_physical_memory(): ''' Return the total number of bytes of physical memory. CLI Example: .. code-block:: bash salt '*' ps.total_physical_memory ''' if psutil.version_info < (0, 6, 0): msg = 'virtual_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) try: return psutil.virtual_memory().total except AttributeError: # TOTAL_PHYMEM is deprecated but with older psutil versions this is # needed as a fallback. return psutil.TOTAL_PHYMEM def num_cpus(): ''' Return the number of CPUs. CLI Example: .. code-block:: bash salt '*' ps.num_cpus ''' try: return psutil.cpu_count() except AttributeError: # NUM_CPUS is deprecated but with older psutil versions this is needed # as a fallback. return psutil.NUM_CPUS def boot_time(time_format=None): ''' Return the boot time in number of seconds since the epoch began. CLI Example: time_format Optionally specify a `strftime`_ format string. Use ``time_format='%c'`` to get a nicely-formatted locale specific date and time (i.e. ``Fri May 2 19:08:32 2014``). .. _strftime: https://docs.python.org/2/library/datetime.html#strftime-strptime-behavior .. versionadded:: 2014.1.4 .. code-block:: bash salt '*' ps.boot_time ''' try: b_time = int(psutil.boot_time()) except AttributeError: # get_boot_time() has been removed in newer psutil versions, and has # been replaced by boot_time() which provides the same information. b_time = int(psutil.boot_time()) if time_format: # Load epoch timestamp as a datetime.datetime object b_time = datetime.datetime.fromtimestamp(b_time) try: return b_time.strftime(time_format) except TypeError as exc: raise SaltInvocationError('Invalid format string: {0}'.format(exc)) return b_time def network_io_counters(interface=None): ''' Return network I/O statistics. CLI Example: .. code-block:: bash salt '*' ps.network_io_counters salt '*' ps.network_io_counters interface=eth0 ''' if not interface: return dict(psutil.net_io_counters()._asdict()) else: stats = psutil.net_io_counters(pernic=True) if interface in stats: return dict(stats[interface]._asdict()) else: return False def disk_io_counters(device=None): ''' Return disk I/O statistics. CLI Example: .. code-block:: bash salt '*' ps.disk_io_counters salt '*' ps.disk_io_counters device=sda1 ''' if not device: return dict(psutil.disk_io_counters()._asdict()) else: stats = psutil.disk_io_counters(perdisk=True) if device in stats: return dict(stats[device]._asdict()) else: return False def get_users(): ''' Return logged-in users. CLI Example: .. code-block:: bash salt '*' ps.get_users ''' try: recs = psutil.users() return [dict(x._asdict()) for x in recs] except AttributeError: # get_users is only present in psutil > v0.5.0 # try utmp try: import utmp # pylint: disable=import-error result = [] while True: rec = utmp.utmpaccess.getutent() if rec is None: return result elif rec[0] == 7: started = rec[8] if isinstance(started, tuple): started = started[0] result.append({'name': rec[4], 'terminal': rec[2], 'started': started, 'host': rec[5]}) except ImportError: return False def lsof(name): ''' Retrieve the lsof information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.lsof apache2 ''' sanitize_name = six.text_type(name) lsof_infos = __salt__['cmd.run']("lsof -c " + sanitize_name) ret = [] ret.extend([sanitize_name, lsof_infos]) return ret @salt.utils.decorators.path.which('netstat') def netstat(name): ''' Retrieve the netstat information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.netstat apache2 ''' sanitize_name = six.text_type(name) netstat_infos = __salt__['cmd.run']("netstat -nap") found_infos = [] ret = [] for info in netstat_infos.splitlines(): if info.find(sanitize_name) != -1: found_infos.append(info) ret.extend([sanitize_name, found_infos]) return ret @salt.utils.decorators.path.which('ss') def ss(name): ''' Retrieve the ss information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.ss apache2 .. versionadded:: 2016.11.6 ''' sanitize_name = six.text_type(name) ss_infos = __salt__['cmd.run']("ss -neap") found_infos = [] ret = [] for info in ss_infos.splitlines(): if info.find(sanitize_name) != -1: found_infos.append(info) ret.extend([sanitize_name, found_infos]) return ret def psaux(name): ''' Retrieve information corresponding to a "ps aux" filtered with the given pattern. It could be just a name or a regular expression (using python search from "re" module). CLI Example: .. code-block:: bash salt '*' ps.psaux www-data.+apache2 ''' sanitize_name = six.text_type(name) pattern = re.compile(sanitize_name) salt_exception_pattern = re.compile("salt.+ps.psaux.+") ps_aux = __salt__['cmd.run']("ps aux") found_infos = [] ret = [] nb_lines = 0 for info in ps_aux.splitlines(): found = pattern.search(info) if found is not None: # remove 'salt' command from results if not salt_exception_pattern.search(info): nb_lines += 1 found_infos.append(info) pid_count = six.text_type(nb_lines) + " occurence(s)." ret = [] ret.extend([sanitize_name, found_infos, pid_count]) return ret
saltstack/salt
salt/modules/ps.py
pkill
python
def pkill(pattern, user=None, signal=15, full=False): ''' Kill processes matching a pattern. .. code-block:: bash salt '*' ps.pkill pattern [user=username] [signal=signal_number] \\ [full=(true|false)] pattern Pattern to search for in the process list. user Limit matches to the given username. Default: All users. signal Signal to send to the process(es). See manpage entry for kill for possible values. Default: 15 (SIGTERM). full A boolean value indicating whether only the name of the command or the full command line should be matched against the pattern. **Examples:** Send SIGHUP to all httpd processes on all 'www' minions: .. code-block:: bash salt 'www.*' ps.pkill httpd signal=1 Send SIGKILL to all bash processes owned by user 'tom': .. code-block:: bash salt '*' ps.pkill bash signal=9 user=tom ''' killed = [] for proc in psutil.process_iter(): name_match = pattern in ' '.join(_get_proc_cmdline(proc)) if full \ else pattern in _get_proc_name(proc) user_match = True if user is None else user == _get_proc_username(proc) if name_match and user_match: try: proc.send_signal(signal) killed.append(_get_proc_pid(proc)) except psutil.NoSuchProcess: pass if not killed: return None else: return {'killed': killed}
Kill processes matching a pattern. .. code-block:: bash salt '*' ps.pkill pattern [user=username] [signal=signal_number] \\ [full=(true|false)] pattern Pattern to search for in the process list. user Limit matches to the given username. Default: All users. signal Signal to send to the process(es). See manpage entry for kill for possible values. Default: 15 (SIGTERM). full A boolean value indicating whether only the name of the command or the full command line should be matched against the pattern. **Examples:** Send SIGHUP to all httpd processes on all 'www' minions: .. code-block:: bash salt 'www.*' ps.pkill httpd signal=1 Send SIGKILL to all bash processes owned by user 'tom': .. code-block:: bash salt '*' ps.pkill bash signal=9 user=tom
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/ps.py#L250-L302
[ "def _get_proc_cmdline(proc):\n '''\n Returns the cmdline of a Process instance.\n\n It's backward compatible with < 2.0 versions of psutil.\n '''\n try:\n return salt.utils.data.decode(proc.cmdline() if PSUTIL2 else proc.cmdline)\n except (psutil.NoSuchProcess, psutil.AccessDenied):\n return []\n", "def _get_proc_name(proc):\n '''\n Returns the name of a Process instance.\n\n It's backward compatible with < 2.0 versions of psutil.\n '''\n try:\n return salt.utils.data.decode(proc.name() if PSUTIL2 else proc.name)\n except (psutil.NoSuchProcess, psutil.AccessDenied):\n return []\n", "def _get_proc_username(proc):\n '''\n Returns the username of a Process instance.\n\n It's backward compatible with < 2.0 versions of psutil.\n '''\n try:\n return salt.utils.data.decode(proc.username() if PSUTIL2 else proc.username)\n except (psutil.NoSuchProcess, psutil.AccessDenied, KeyError):\n return None\n", "def _get_proc_pid(proc):\n '''\n Returns the pid of a Process instance.\n\n It's backward compatible with < 2.0 versions of psutil.\n '''\n return proc.pid\n" ]
# -*- coding: utf-8 -*- ''' A salt interface to psutil, a system and process library. See http://code.google.com/p/psutil. :depends: - psutil Python module, version 0.3.0 or later - python-utmp package (optional) ''' # Import python libs from __future__ import absolute_import, unicode_literals, print_function import time import datetime import re # Import salt libs import salt.utils.data from salt.exceptions import SaltInvocationError, CommandExecutionError # Import third party libs import salt.utils.decorators.path from salt.ext import six # pylint: disable=import-error try: import salt.utils.psutil_compat as psutil HAS_PSUTIL = True PSUTIL2 = getattr(psutil, 'version_info', ()) >= (2, 0) except ImportError: HAS_PSUTIL = False # pylint: enable=import-error def __virtual__(): if not HAS_PSUTIL: return False, 'The ps module cannot be loaded: python module psutil not installed.' # Functions and attributes used in this execution module seem to have been # added as of psutil 0.3.0, from an inspection of the source code. Only # make this module available if the version of psutil is >= 0.3.0. Note # that this may need to be tweaked if we find post-0.3.0 versions which # also have problems running the functions in this execution module, but # most distributions have already moved to later versions (for example, # as of Dec. 2013 EPEL is on 0.6.1, Debian 7 is on 0.5.1, etc.). if psutil.version_info >= (0, 3, 0): return True return (False, 'The ps execution module cannot be loaded: the psutil python module version {0} is less than 0.3.0'.format(psutil.version_info)) def _get_proc_cmdline(proc): ''' Returns the cmdline of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.cmdline() if PSUTIL2 else proc.cmdline) except (psutil.NoSuchProcess, psutil.AccessDenied): return [] def _get_proc_create_time(proc): ''' Returns the create_time of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.create_time() if PSUTIL2 else proc.create_time) except (psutil.NoSuchProcess, psutil.AccessDenied): return None def _get_proc_name(proc): ''' Returns the name of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.name() if PSUTIL2 else proc.name) except (psutil.NoSuchProcess, psutil.AccessDenied): return [] def _get_proc_status(proc): ''' Returns the status of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.status() if PSUTIL2 else proc.status) except (psutil.NoSuchProcess, psutil.AccessDenied): return None def _get_proc_username(proc): ''' Returns the username of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.username() if PSUTIL2 else proc.username) except (psutil.NoSuchProcess, psutil.AccessDenied, KeyError): return None def _get_proc_pid(proc): ''' Returns the pid of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' return proc.pid def top(num_processes=5, interval=3): ''' Return a list of top CPU consuming processes during the interval. num_processes = return the top N CPU consuming processes interval = the number of seconds to sample CPU usage over CLI Examples: .. code-block:: bash salt '*' ps.top salt '*' ps.top 5 10 ''' result = [] start_usage = {} for pid in psutil.pids(): try: process = psutil.Process(pid) user, system = process.cpu_times() except ValueError: user, system, _, _ = process.cpu_times() except psutil.NoSuchProcess: continue start_usage[process] = user + system time.sleep(interval) usage = set() for process, start in six.iteritems(start_usage): try: user, system = process.cpu_times() except ValueError: user, system, _, _ = process.cpu_times() except psutil.NoSuchProcess: continue now = user + system diff = now - start usage.add((diff, process)) for idx, (diff, process) in enumerate(reversed(sorted(usage))): if num_processes and idx >= num_processes: break if not _get_proc_cmdline(process): cmdline = _get_proc_name(process) else: cmdline = _get_proc_cmdline(process) info = {'cmd': cmdline, 'user': _get_proc_username(process), 'status': _get_proc_status(process), 'pid': _get_proc_pid(process), 'create_time': _get_proc_create_time(process), 'cpu': {}, 'mem': {}, } for key, value in six.iteritems(process.cpu_times()._asdict()): info['cpu'][key] = value for key, value in six.iteritems(process.memory_info()._asdict()): info['mem'][key] = value result.append(info) return result def get_pid_list(): ''' Return a list of process ids (PIDs) for all running processes. CLI Example: .. code-block:: bash salt '*' ps.get_pid_list ''' return psutil.pids() def proc_info(pid, attrs=None): ''' Return a dictionary of information for a process id (PID). CLI Example: .. code-block:: bash salt '*' ps.proc_info 2322 salt '*' ps.proc_info 2322 attrs='["pid", "name"]' pid PID of process to query. attrs Optional list of desired process attributes. The list of possible attributes can be found here: http://pythonhosted.org/psutil/#psutil.Process ''' try: proc = psutil.Process(pid) return proc.as_dict(attrs) except (psutil.NoSuchProcess, psutil.AccessDenied, AttributeError) as exc: raise CommandExecutionError(exc) def kill_pid(pid, signal=15): ''' Kill a process by PID. .. code-block:: bash salt 'minion' ps.kill_pid pid [signal=signal_number] pid PID of process to kill. signal Signal to send to the process. See manpage entry for kill for possible values. Default: 15 (SIGTERM). **Example:** Send SIGKILL to process with PID 2000: .. code-block:: bash salt 'minion' ps.kill_pid 2000 signal=9 ''' try: psutil.Process(pid).send_signal(signal) return True except psutil.NoSuchProcess: return False def pgrep(pattern, user=None, full=False, pattern_is_regex=False): ''' Return the pids for processes matching a pattern. If full is true, the full command line is searched for a match, otherwise only the name of the command is searched. .. code-block:: bash salt '*' ps.pgrep pattern [user=username] [full=(true|false)] pattern Pattern to search for in the process list. user Limit matches to the given username. Default: All users. full A boolean value indicating whether only the name of the command or the full command line should be matched against the pattern. pattern_is_regex This flag enables ps.pgrep to mirror the regex search functionality found in the pgrep command line utility. .. versionadded:: Neon **Examples:** Find all httpd processes on all 'www' minions: .. code-block:: bash salt 'www.*' ps.pgrep httpd Find all bash processes owned by user 'tom': .. code-block:: bash salt '*' ps.pgrep bash user=tom ''' procs = [] if pattern_is_regex: pattern = re.compile(str(pattern)) procs = [] for proc in psutil.process_iter(): if full: process_line = ' '.join(_get_proc_cmdline(proc)) else: process_line = _get_proc_name(proc) if pattern_is_regex: name_match = re.search(pattern, process_line) else: name_match = pattern in process_line user_match = True if user is None else user == _get_proc_username(proc) if name_match and user_match: procs.append(_get_proc_pid(proc)) return procs or None def cpu_percent(interval=0.1, per_cpu=False): ''' Return the percent of time the CPU is busy. interval the number of seconds to sample CPU usage over per_cpu if True return an array of CPU percent busy for each CPU, otherwise aggregate all percents into one number CLI Example: .. code-block:: bash salt '*' ps.cpu_percent ''' if per_cpu: result = list(psutil.cpu_percent(interval, True)) else: result = psutil.cpu_percent(interval) return result def cpu_times(per_cpu=False): ''' Return the percent of time the CPU spends in each state, e.g. user, system, idle, nice, iowait, irq, softirq. per_cpu if True return an array of percents for each CPU, otherwise aggregate all percents into one number CLI Example: .. code-block:: bash salt '*' ps.cpu_times ''' if per_cpu: result = [dict(times._asdict()) for times in psutil.cpu_times(True)] else: result = dict(psutil.cpu_times(per_cpu)._asdict()) return result def virtual_memory(): ''' .. versionadded:: 2014.7.0 Return a dict that describes statistics about system memory usage. .. note:: This function is only available in psutil version 0.6.0 and above. CLI Example: .. code-block:: bash salt '*' ps.virtual_memory ''' if psutil.version_info < (0, 6, 0): msg = 'virtual_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) return dict(psutil.virtual_memory()._asdict()) def swap_memory(): ''' .. versionadded:: 2014.7.0 Return a dict that describes swap memory statistics. .. note:: This function is only available in psutil version 0.6.0 and above. CLI Example: .. code-block:: bash salt '*' ps.swap_memory ''' if psutil.version_info < (0, 6, 0): msg = 'swap_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) return dict(psutil.swap_memory()._asdict()) def disk_partitions(all=False): ''' Return a list of disk partitions and their device, mount point, and filesystem type. all if set to False, only return local, physical partitions (hard disk, USB, CD/DVD partitions). If True, return all filesystems. CLI Example: .. code-block:: bash salt '*' ps.disk_partitions ''' result = [dict(partition._asdict()) for partition in psutil.disk_partitions(all)] return result def disk_usage(path): ''' Given a path, return a dict listing the total available space as well as the free space, and used space. CLI Example: .. code-block:: bash salt '*' ps.disk_usage /home ''' return dict(psutil.disk_usage(path)._asdict()) def disk_partition_usage(all=False): ''' Return a list of disk partitions plus the mount point, filesystem and usage statistics. CLI Example: .. code-block:: bash salt '*' ps.disk_partition_usage ''' result = disk_partitions(all) for partition in result: partition.update(disk_usage(partition['mountpoint'])) return result def total_physical_memory(): ''' Return the total number of bytes of physical memory. CLI Example: .. code-block:: bash salt '*' ps.total_physical_memory ''' if psutil.version_info < (0, 6, 0): msg = 'virtual_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) try: return psutil.virtual_memory().total except AttributeError: # TOTAL_PHYMEM is deprecated but with older psutil versions this is # needed as a fallback. return psutil.TOTAL_PHYMEM def num_cpus(): ''' Return the number of CPUs. CLI Example: .. code-block:: bash salt '*' ps.num_cpus ''' try: return psutil.cpu_count() except AttributeError: # NUM_CPUS is deprecated but with older psutil versions this is needed # as a fallback. return psutil.NUM_CPUS def boot_time(time_format=None): ''' Return the boot time in number of seconds since the epoch began. CLI Example: time_format Optionally specify a `strftime`_ format string. Use ``time_format='%c'`` to get a nicely-formatted locale specific date and time (i.e. ``Fri May 2 19:08:32 2014``). .. _strftime: https://docs.python.org/2/library/datetime.html#strftime-strptime-behavior .. versionadded:: 2014.1.4 .. code-block:: bash salt '*' ps.boot_time ''' try: b_time = int(psutil.boot_time()) except AttributeError: # get_boot_time() has been removed in newer psutil versions, and has # been replaced by boot_time() which provides the same information. b_time = int(psutil.boot_time()) if time_format: # Load epoch timestamp as a datetime.datetime object b_time = datetime.datetime.fromtimestamp(b_time) try: return b_time.strftime(time_format) except TypeError as exc: raise SaltInvocationError('Invalid format string: {0}'.format(exc)) return b_time def network_io_counters(interface=None): ''' Return network I/O statistics. CLI Example: .. code-block:: bash salt '*' ps.network_io_counters salt '*' ps.network_io_counters interface=eth0 ''' if not interface: return dict(psutil.net_io_counters()._asdict()) else: stats = psutil.net_io_counters(pernic=True) if interface in stats: return dict(stats[interface]._asdict()) else: return False def disk_io_counters(device=None): ''' Return disk I/O statistics. CLI Example: .. code-block:: bash salt '*' ps.disk_io_counters salt '*' ps.disk_io_counters device=sda1 ''' if not device: return dict(psutil.disk_io_counters()._asdict()) else: stats = psutil.disk_io_counters(perdisk=True) if device in stats: return dict(stats[device]._asdict()) else: return False def get_users(): ''' Return logged-in users. CLI Example: .. code-block:: bash salt '*' ps.get_users ''' try: recs = psutil.users() return [dict(x._asdict()) for x in recs] except AttributeError: # get_users is only present in psutil > v0.5.0 # try utmp try: import utmp # pylint: disable=import-error result = [] while True: rec = utmp.utmpaccess.getutent() if rec is None: return result elif rec[0] == 7: started = rec[8] if isinstance(started, tuple): started = started[0] result.append({'name': rec[4], 'terminal': rec[2], 'started': started, 'host': rec[5]}) except ImportError: return False def lsof(name): ''' Retrieve the lsof information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.lsof apache2 ''' sanitize_name = six.text_type(name) lsof_infos = __salt__['cmd.run']("lsof -c " + sanitize_name) ret = [] ret.extend([sanitize_name, lsof_infos]) return ret @salt.utils.decorators.path.which('netstat') def netstat(name): ''' Retrieve the netstat information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.netstat apache2 ''' sanitize_name = six.text_type(name) netstat_infos = __salt__['cmd.run']("netstat -nap") found_infos = [] ret = [] for info in netstat_infos.splitlines(): if info.find(sanitize_name) != -1: found_infos.append(info) ret.extend([sanitize_name, found_infos]) return ret @salt.utils.decorators.path.which('ss') def ss(name): ''' Retrieve the ss information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.ss apache2 .. versionadded:: 2016.11.6 ''' sanitize_name = six.text_type(name) ss_infos = __salt__['cmd.run']("ss -neap") found_infos = [] ret = [] for info in ss_infos.splitlines(): if info.find(sanitize_name) != -1: found_infos.append(info) ret.extend([sanitize_name, found_infos]) return ret def psaux(name): ''' Retrieve information corresponding to a "ps aux" filtered with the given pattern. It could be just a name or a regular expression (using python search from "re" module). CLI Example: .. code-block:: bash salt '*' ps.psaux www-data.+apache2 ''' sanitize_name = six.text_type(name) pattern = re.compile(sanitize_name) salt_exception_pattern = re.compile("salt.+ps.psaux.+") ps_aux = __salt__['cmd.run']("ps aux") found_infos = [] ret = [] nb_lines = 0 for info in ps_aux.splitlines(): found = pattern.search(info) if found is not None: # remove 'salt' command from results if not salt_exception_pattern.search(info): nb_lines += 1 found_infos.append(info) pid_count = six.text_type(nb_lines) + " occurence(s)." ret = [] ret.extend([sanitize_name, found_infos, pid_count]) return ret
saltstack/salt
salt/modules/ps.py
pgrep
python
def pgrep(pattern, user=None, full=False, pattern_is_regex=False): ''' Return the pids for processes matching a pattern. If full is true, the full command line is searched for a match, otherwise only the name of the command is searched. .. code-block:: bash salt '*' ps.pgrep pattern [user=username] [full=(true|false)] pattern Pattern to search for in the process list. user Limit matches to the given username. Default: All users. full A boolean value indicating whether only the name of the command or the full command line should be matched against the pattern. pattern_is_regex This flag enables ps.pgrep to mirror the regex search functionality found in the pgrep command line utility. .. versionadded:: Neon **Examples:** Find all httpd processes on all 'www' minions: .. code-block:: bash salt 'www.*' ps.pgrep httpd Find all bash processes owned by user 'tom': .. code-block:: bash salt '*' ps.pgrep bash user=tom ''' procs = [] if pattern_is_regex: pattern = re.compile(str(pattern)) procs = [] for proc in psutil.process_iter(): if full: process_line = ' '.join(_get_proc_cmdline(proc)) else: process_line = _get_proc_name(proc) if pattern_is_regex: name_match = re.search(pattern, process_line) else: name_match = pattern in process_line user_match = True if user is None else user == _get_proc_username(proc) if name_match and user_match: procs.append(_get_proc_pid(proc)) return procs or None
Return the pids for processes matching a pattern. If full is true, the full command line is searched for a match, otherwise only the name of the command is searched. .. code-block:: bash salt '*' ps.pgrep pattern [user=username] [full=(true|false)] pattern Pattern to search for in the process list. user Limit matches to the given username. Default: All users. full A boolean value indicating whether only the name of the command or the full command line should be matched against the pattern. pattern_is_regex This flag enables ps.pgrep to mirror the regex search functionality found in the pgrep command line utility. .. versionadded:: Neon **Examples:** Find all httpd processes on all 'www' minions: .. code-block:: bash salt 'www.*' ps.pgrep httpd Find all bash processes owned by user 'tom': .. code-block:: bash salt '*' ps.pgrep bash user=tom
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/ps.py#L305-L368
[ "def _get_proc_cmdline(proc):\n '''\n Returns the cmdline of a Process instance.\n\n It's backward compatible with < 2.0 versions of psutil.\n '''\n try:\n return salt.utils.data.decode(proc.cmdline() if PSUTIL2 else proc.cmdline)\n except (psutil.NoSuchProcess, psutil.AccessDenied):\n return []\n", "def _get_proc_name(proc):\n '''\n Returns the name of a Process instance.\n\n It's backward compatible with < 2.0 versions of psutil.\n '''\n try:\n return salt.utils.data.decode(proc.name() if PSUTIL2 else proc.name)\n except (psutil.NoSuchProcess, psutil.AccessDenied):\n return []\n", "def _get_proc_username(proc):\n '''\n Returns the username of a Process instance.\n\n It's backward compatible with < 2.0 versions of psutil.\n '''\n try:\n return salt.utils.data.decode(proc.username() if PSUTIL2 else proc.username)\n except (psutil.NoSuchProcess, psutil.AccessDenied, KeyError):\n return None\n", "def _get_proc_pid(proc):\n '''\n Returns the pid of a Process instance.\n\n It's backward compatible with < 2.0 versions of psutil.\n '''\n return proc.pid\n" ]
# -*- coding: utf-8 -*- ''' A salt interface to psutil, a system and process library. See http://code.google.com/p/psutil. :depends: - psutil Python module, version 0.3.0 or later - python-utmp package (optional) ''' # Import python libs from __future__ import absolute_import, unicode_literals, print_function import time import datetime import re # Import salt libs import salt.utils.data from salt.exceptions import SaltInvocationError, CommandExecutionError # Import third party libs import salt.utils.decorators.path from salt.ext import six # pylint: disable=import-error try: import salt.utils.psutil_compat as psutil HAS_PSUTIL = True PSUTIL2 = getattr(psutil, 'version_info', ()) >= (2, 0) except ImportError: HAS_PSUTIL = False # pylint: enable=import-error def __virtual__(): if not HAS_PSUTIL: return False, 'The ps module cannot be loaded: python module psutil not installed.' # Functions and attributes used in this execution module seem to have been # added as of psutil 0.3.0, from an inspection of the source code. Only # make this module available if the version of psutil is >= 0.3.0. Note # that this may need to be tweaked if we find post-0.3.0 versions which # also have problems running the functions in this execution module, but # most distributions have already moved to later versions (for example, # as of Dec. 2013 EPEL is on 0.6.1, Debian 7 is on 0.5.1, etc.). if psutil.version_info >= (0, 3, 0): return True return (False, 'The ps execution module cannot be loaded: the psutil python module version {0} is less than 0.3.0'.format(psutil.version_info)) def _get_proc_cmdline(proc): ''' Returns the cmdline of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.cmdline() if PSUTIL2 else proc.cmdline) except (psutil.NoSuchProcess, psutil.AccessDenied): return [] def _get_proc_create_time(proc): ''' Returns the create_time of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.create_time() if PSUTIL2 else proc.create_time) except (psutil.NoSuchProcess, psutil.AccessDenied): return None def _get_proc_name(proc): ''' Returns the name of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.name() if PSUTIL2 else proc.name) except (psutil.NoSuchProcess, psutil.AccessDenied): return [] def _get_proc_status(proc): ''' Returns the status of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.status() if PSUTIL2 else proc.status) except (psutil.NoSuchProcess, psutil.AccessDenied): return None def _get_proc_username(proc): ''' Returns the username of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.username() if PSUTIL2 else proc.username) except (psutil.NoSuchProcess, psutil.AccessDenied, KeyError): return None def _get_proc_pid(proc): ''' Returns the pid of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' return proc.pid def top(num_processes=5, interval=3): ''' Return a list of top CPU consuming processes during the interval. num_processes = return the top N CPU consuming processes interval = the number of seconds to sample CPU usage over CLI Examples: .. code-block:: bash salt '*' ps.top salt '*' ps.top 5 10 ''' result = [] start_usage = {} for pid in psutil.pids(): try: process = psutil.Process(pid) user, system = process.cpu_times() except ValueError: user, system, _, _ = process.cpu_times() except psutil.NoSuchProcess: continue start_usage[process] = user + system time.sleep(interval) usage = set() for process, start in six.iteritems(start_usage): try: user, system = process.cpu_times() except ValueError: user, system, _, _ = process.cpu_times() except psutil.NoSuchProcess: continue now = user + system diff = now - start usage.add((diff, process)) for idx, (diff, process) in enumerate(reversed(sorted(usage))): if num_processes and idx >= num_processes: break if not _get_proc_cmdline(process): cmdline = _get_proc_name(process) else: cmdline = _get_proc_cmdline(process) info = {'cmd': cmdline, 'user': _get_proc_username(process), 'status': _get_proc_status(process), 'pid': _get_proc_pid(process), 'create_time': _get_proc_create_time(process), 'cpu': {}, 'mem': {}, } for key, value in six.iteritems(process.cpu_times()._asdict()): info['cpu'][key] = value for key, value in six.iteritems(process.memory_info()._asdict()): info['mem'][key] = value result.append(info) return result def get_pid_list(): ''' Return a list of process ids (PIDs) for all running processes. CLI Example: .. code-block:: bash salt '*' ps.get_pid_list ''' return psutil.pids() def proc_info(pid, attrs=None): ''' Return a dictionary of information for a process id (PID). CLI Example: .. code-block:: bash salt '*' ps.proc_info 2322 salt '*' ps.proc_info 2322 attrs='["pid", "name"]' pid PID of process to query. attrs Optional list of desired process attributes. The list of possible attributes can be found here: http://pythonhosted.org/psutil/#psutil.Process ''' try: proc = psutil.Process(pid) return proc.as_dict(attrs) except (psutil.NoSuchProcess, psutil.AccessDenied, AttributeError) as exc: raise CommandExecutionError(exc) def kill_pid(pid, signal=15): ''' Kill a process by PID. .. code-block:: bash salt 'minion' ps.kill_pid pid [signal=signal_number] pid PID of process to kill. signal Signal to send to the process. See manpage entry for kill for possible values. Default: 15 (SIGTERM). **Example:** Send SIGKILL to process with PID 2000: .. code-block:: bash salt 'minion' ps.kill_pid 2000 signal=9 ''' try: psutil.Process(pid).send_signal(signal) return True except psutil.NoSuchProcess: return False def pkill(pattern, user=None, signal=15, full=False): ''' Kill processes matching a pattern. .. code-block:: bash salt '*' ps.pkill pattern [user=username] [signal=signal_number] \\ [full=(true|false)] pattern Pattern to search for in the process list. user Limit matches to the given username. Default: All users. signal Signal to send to the process(es). See manpage entry for kill for possible values. Default: 15 (SIGTERM). full A boolean value indicating whether only the name of the command or the full command line should be matched against the pattern. **Examples:** Send SIGHUP to all httpd processes on all 'www' minions: .. code-block:: bash salt 'www.*' ps.pkill httpd signal=1 Send SIGKILL to all bash processes owned by user 'tom': .. code-block:: bash salt '*' ps.pkill bash signal=9 user=tom ''' killed = [] for proc in psutil.process_iter(): name_match = pattern in ' '.join(_get_proc_cmdline(proc)) if full \ else pattern in _get_proc_name(proc) user_match = True if user is None else user == _get_proc_username(proc) if name_match and user_match: try: proc.send_signal(signal) killed.append(_get_proc_pid(proc)) except psutil.NoSuchProcess: pass if not killed: return None else: return {'killed': killed} def cpu_percent(interval=0.1, per_cpu=False): ''' Return the percent of time the CPU is busy. interval the number of seconds to sample CPU usage over per_cpu if True return an array of CPU percent busy for each CPU, otherwise aggregate all percents into one number CLI Example: .. code-block:: bash salt '*' ps.cpu_percent ''' if per_cpu: result = list(psutil.cpu_percent(interval, True)) else: result = psutil.cpu_percent(interval) return result def cpu_times(per_cpu=False): ''' Return the percent of time the CPU spends in each state, e.g. user, system, idle, nice, iowait, irq, softirq. per_cpu if True return an array of percents for each CPU, otherwise aggregate all percents into one number CLI Example: .. code-block:: bash salt '*' ps.cpu_times ''' if per_cpu: result = [dict(times._asdict()) for times in psutil.cpu_times(True)] else: result = dict(psutil.cpu_times(per_cpu)._asdict()) return result def virtual_memory(): ''' .. versionadded:: 2014.7.0 Return a dict that describes statistics about system memory usage. .. note:: This function is only available in psutil version 0.6.0 and above. CLI Example: .. code-block:: bash salt '*' ps.virtual_memory ''' if psutil.version_info < (0, 6, 0): msg = 'virtual_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) return dict(psutil.virtual_memory()._asdict()) def swap_memory(): ''' .. versionadded:: 2014.7.0 Return a dict that describes swap memory statistics. .. note:: This function is only available in psutil version 0.6.0 and above. CLI Example: .. code-block:: bash salt '*' ps.swap_memory ''' if psutil.version_info < (0, 6, 0): msg = 'swap_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) return dict(psutil.swap_memory()._asdict()) def disk_partitions(all=False): ''' Return a list of disk partitions and their device, mount point, and filesystem type. all if set to False, only return local, physical partitions (hard disk, USB, CD/DVD partitions). If True, return all filesystems. CLI Example: .. code-block:: bash salt '*' ps.disk_partitions ''' result = [dict(partition._asdict()) for partition in psutil.disk_partitions(all)] return result def disk_usage(path): ''' Given a path, return a dict listing the total available space as well as the free space, and used space. CLI Example: .. code-block:: bash salt '*' ps.disk_usage /home ''' return dict(psutil.disk_usage(path)._asdict()) def disk_partition_usage(all=False): ''' Return a list of disk partitions plus the mount point, filesystem and usage statistics. CLI Example: .. code-block:: bash salt '*' ps.disk_partition_usage ''' result = disk_partitions(all) for partition in result: partition.update(disk_usage(partition['mountpoint'])) return result def total_physical_memory(): ''' Return the total number of bytes of physical memory. CLI Example: .. code-block:: bash salt '*' ps.total_physical_memory ''' if psutil.version_info < (0, 6, 0): msg = 'virtual_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) try: return psutil.virtual_memory().total except AttributeError: # TOTAL_PHYMEM is deprecated but with older psutil versions this is # needed as a fallback. return psutil.TOTAL_PHYMEM def num_cpus(): ''' Return the number of CPUs. CLI Example: .. code-block:: bash salt '*' ps.num_cpus ''' try: return psutil.cpu_count() except AttributeError: # NUM_CPUS is deprecated but with older psutil versions this is needed # as a fallback. return psutil.NUM_CPUS def boot_time(time_format=None): ''' Return the boot time in number of seconds since the epoch began. CLI Example: time_format Optionally specify a `strftime`_ format string. Use ``time_format='%c'`` to get a nicely-formatted locale specific date and time (i.e. ``Fri May 2 19:08:32 2014``). .. _strftime: https://docs.python.org/2/library/datetime.html#strftime-strptime-behavior .. versionadded:: 2014.1.4 .. code-block:: bash salt '*' ps.boot_time ''' try: b_time = int(psutil.boot_time()) except AttributeError: # get_boot_time() has been removed in newer psutil versions, and has # been replaced by boot_time() which provides the same information. b_time = int(psutil.boot_time()) if time_format: # Load epoch timestamp as a datetime.datetime object b_time = datetime.datetime.fromtimestamp(b_time) try: return b_time.strftime(time_format) except TypeError as exc: raise SaltInvocationError('Invalid format string: {0}'.format(exc)) return b_time def network_io_counters(interface=None): ''' Return network I/O statistics. CLI Example: .. code-block:: bash salt '*' ps.network_io_counters salt '*' ps.network_io_counters interface=eth0 ''' if not interface: return dict(psutil.net_io_counters()._asdict()) else: stats = psutil.net_io_counters(pernic=True) if interface in stats: return dict(stats[interface]._asdict()) else: return False def disk_io_counters(device=None): ''' Return disk I/O statistics. CLI Example: .. code-block:: bash salt '*' ps.disk_io_counters salt '*' ps.disk_io_counters device=sda1 ''' if not device: return dict(psutil.disk_io_counters()._asdict()) else: stats = psutil.disk_io_counters(perdisk=True) if device in stats: return dict(stats[device]._asdict()) else: return False def get_users(): ''' Return logged-in users. CLI Example: .. code-block:: bash salt '*' ps.get_users ''' try: recs = psutil.users() return [dict(x._asdict()) for x in recs] except AttributeError: # get_users is only present in psutil > v0.5.0 # try utmp try: import utmp # pylint: disable=import-error result = [] while True: rec = utmp.utmpaccess.getutent() if rec is None: return result elif rec[0] == 7: started = rec[8] if isinstance(started, tuple): started = started[0] result.append({'name': rec[4], 'terminal': rec[2], 'started': started, 'host': rec[5]}) except ImportError: return False def lsof(name): ''' Retrieve the lsof information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.lsof apache2 ''' sanitize_name = six.text_type(name) lsof_infos = __salt__['cmd.run']("lsof -c " + sanitize_name) ret = [] ret.extend([sanitize_name, lsof_infos]) return ret @salt.utils.decorators.path.which('netstat') def netstat(name): ''' Retrieve the netstat information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.netstat apache2 ''' sanitize_name = six.text_type(name) netstat_infos = __salt__['cmd.run']("netstat -nap") found_infos = [] ret = [] for info in netstat_infos.splitlines(): if info.find(sanitize_name) != -1: found_infos.append(info) ret.extend([sanitize_name, found_infos]) return ret @salt.utils.decorators.path.which('ss') def ss(name): ''' Retrieve the ss information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.ss apache2 .. versionadded:: 2016.11.6 ''' sanitize_name = six.text_type(name) ss_infos = __salt__['cmd.run']("ss -neap") found_infos = [] ret = [] for info in ss_infos.splitlines(): if info.find(sanitize_name) != -1: found_infos.append(info) ret.extend([sanitize_name, found_infos]) return ret def psaux(name): ''' Retrieve information corresponding to a "ps aux" filtered with the given pattern. It could be just a name or a regular expression (using python search from "re" module). CLI Example: .. code-block:: bash salt '*' ps.psaux www-data.+apache2 ''' sanitize_name = six.text_type(name) pattern = re.compile(sanitize_name) salt_exception_pattern = re.compile("salt.+ps.psaux.+") ps_aux = __salt__['cmd.run']("ps aux") found_infos = [] ret = [] nb_lines = 0 for info in ps_aux.splitlines(): found = pattern.search(info) if found is not None: # remove 'salt' command from results if not salt_exception_pattern.search(info): nb_lines += 1 found_infos.append(info) pid_count = six.text_type(nb_lines) + " occurence(s)." ret = [] ret.extend([sanitize_name, found_infos, pid_count]) return ret
saltstack/salt
salt/modules/ps.py
cpu_percent
python
def cpu_percent(interval=0.1, per_cpu=False): ''' Return the percent of time the CPU is busy. interval the number of seconds to sample CPU usage over per_cpu if True return an array of CPU percent busy for each CPU, otherwise aggregate all percents into one number CLI Example: .. code-block:: bash salt '*' ps.cpu_percent ''' if per_cpu: result = list(psutil.cpu_percent(interval, True)) else: result = psutil.cpu_percent(interval) return result
Return the percent of time the CPU is busy. interval the number of seconds to sample CPU usage over per_cpu if True return an array of CPU percent busy for each CPU, otherwise aggregate all percents into one number CLI Example: .. code-block:: bash salt '*' ps.cpu_percent
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/ps.py#L371-L391
null
# -*- coding: utf-8 -*- ''' A salt interface to psutil, a system and process library. See http://code.google.com/p/psutil. :depends: - psutil Python module, version 0.3.0 or later - python-utmp package (optional) ''' # Import python libs from __future__ import absolute_import, unicode_literals, print_function import time import datetime import re # Import salt libs import salt.utils.data from salt.exceptions import SaltInvocationError, CommandExecutionError # Import third party libs import salt.utils.decorators.path from salt.ext import six # pylint: disable=import-error try: import salt.utils.psutil_compat as psutil HAS_PSUTIL = True PSUTIL2 = getattr(psutil, 'version_info', ()) >= (2, 0) except ImportError: HAS_PSUTIL = False # pylint: enable=import-error def __virtual__(): if not HAS_PSUTIL: return False, 'The ps module cannot be loaded: python module psutil not installed.' # Functions and attributes used in this execution module seem to have been # added as of psutil 0.3.0, from an inspection of the source code. Only # make this module available if the version of psutil is >= 0.3.0. Note # that this may need to be tweaked if we find post-0.3.0 versions which # also have problems running the functions in this execution module, but # most distributions have already moved to later versions (for example, # as of Dec. 2013 EPEL is on 0.6.1, Debian 7 is on 0.5.1, etc.). if psutil.version_info >= (0, 3, 0): return True return (False, 'The ps execution module cannot be loaded: the psutil python module version {0} is less than 0.3.0'.format(psutil.version_info)) def _get_proc_cmdline(proc): ''' Returns the cmdline of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.cmdline() if PSUTIL2 else proc.cmdline) except (psutil.NoSuchProcess, psutil.AccessDenied): return [] def _get_proc_create_time(proc): ''' Returns the create_time of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.create_time() if PSUTIL2 else proc.create_time) except (psutil.NoSuchProcess, psutil.AccessDenied): return None def _get_proc_name(proc): ''' Returns the name of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.name() if PSUTIL2 else proc.name) except (psutil.NoSuchProcess, psutil.AccessDenied): return [] def _get_proc_status(proc): ''' Returns the status of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.status() if PSUTIL2 else proc.status) except (psutil.NoSuchProcess, psutil.AccessDenied): return None def _get_proc_username(proc): ''' Returns the username of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.username() if PSUTIL2 else proc.username) except (psutil.NoSuchProcess, psutil.AccessDenied, KeyError): return None def _get_proc_pid(proc): ''' Returns the pid of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' return proc.pid def top(num_processes=5, interval=3): ''' Return a list of top CPU consuming processes during the interval. num_processes = return the top N CPU consuming processes interval = the number of seconds to sample CPU usage over CLI Examples: .. code-block:: bash salt '*' ps.top salt '*' ps.top 5 10 ''' result = [] start_usage = {} for pid in psutil.pids(): try: process = psutil.Process(pid) user, system = process.cpu_times() except ValueError: user, system, _, _ = process.cpu_times() except psutil.NoSuchProcess: continue start_usage[process] = user + system time.sleep(interval) usage = set() for process, start in six.iteritems(start_usage): try: user, system = process.cpu_times() except ValueError: user, system, _, _ = process.cpu_times() except psutil.NoSuchProcess: continue now = user + system diff = now - start usage.add((diff, process)) for idx, (diff, process) in enumerate(reversed(sorted(usage))): if num_processes and idx >= num_processes: break if not _get_proc_cmdline(process): cmdline = _get_proc_name(process) else: cmdline = _get_proc_cmdline(process) info = {'cmd': cmdline, 'user': _get_proc_username(process), 'status': _get_proc_status(process), 'pid': _get_proc_pid(process), 'create_time': _get_proc_create_time(process), 'cpu': {}, 'mem': {}, } for key, value in six.iteritems(process.cpu_times()._asdict()): info['cpu'][key] = value for key, value in six.iteritems(process.memory_info()._asdict()): info['mem'][key] = value result.append(info) return result def get_pid_list(): ''' Return a list of process ids (PIDs) for all running processes. CLI Example: .. code-block:: bash salt '*' ps.get_pid_list ''' return psutil.pids() def proc_info(pid, attrs=None): ''' Return a dictionary of information for a process id (PID). CLI Example: .. code-block:: bash salt '*' ps.proc_info 2322 salt '*' ps.proc_info 2322 attrs='["pid", "name"]' pid PID of process to query. attrs Optional list of desired process attributes. The list of possible attributes can be found here: http://pythonhosted.org/psutil/#psutil.Process ''' try: proc = psutil.Process(pid) return proc.as_dict(attrs) except (psutil.NoSuchProcess, psutil.AccessDenied, AttributeError) as exc: raise CommandExecutionError(exc) def kill_pid(pid, signal=15): ''' Kill a process by PID. .. code-block:: bash salt 'minion' ps.kill_pid pid [signal=signal_number] pid PID of process to kill. signal Signal to send to the process. See manpage entry for kill for possible values. Default: 15 (SIGTERM). **Example:** Send SIGKILL to process with PID 2000: .. code-block:: bash salt 'minion' ps.kill_pid 2000 signal=9 ''' try: psutil.Process(pid).send_signal(signal) return True except psutil.NoSuchProcess: return False def pkill(pattern, user=None, signal=15, full=False): ''' Kill processes matching a pattern. .. code-block:: bash salt '*' ps.pkill pattern [user=username] [signal=signal_number] \\ [full=(true|false)] pattern Pattern to search for in the process list. user Limit matches to the given username. Default: All users. signal Signal to send to the process(es). See manpage entry for kill for possible values. Default: 15 (SIGTERM). full A boolean value indicating whether only the name of the command or the full command line should be matched against the pattern. **Examples:** Send SIGHUP to all httpd processes on all 'www' minions: .. code-block:: bash salt 'www.*' ps.pkill httpd signal=1 Send SIGKILL to all bash processes owned by user 'tom': .. code-block:: bash salt '*' ps.pkill bash signal=9 user=tom ''' killed = [] for proc in psutil.process_iter(): name_match = pattern in ' '.join(_get_proc_cmdline(proc)) if full \ else pattern in _get_proc_name(proc) user_match = True if user is None else user == _get_proc_username(proc) if name_match and user_match: try: proc.send_signal(signal) killed.append(_get_proc_pid(proc)) except psutil.NoSuchProcess: pass if not killed: return None else: return {'killed': killed} def pgrep(pattern, user=None, full=False, pattern_is_regex=False): ''' Return the pids for processes matching a pattern. If full is true, the full command line is searched for a match, otherwise only the name of the command is searched. .. code-block:: bash salt '*' ps.pgrep pattern [user=username] [full=(true|false)] pattern Pattern to search for in the process list. user Limit matches to the given username. Default: All users. full A boolean value indicating whether only the name of the command or the full command line should be matched against the pattern. pattern_is_regex This flag enables ps.pgrep to mirror the regex search functionality found in the pgrep command line utility. .. versionadded:: Neon **Examples:** Find all httpd processes on all 'www' minions: .. code-block:: bash salt 'www.*' ps.pgrep httpd Find all bash processes owned by user 'tom': .. code-block:: bash salt '*' ps.pgrep bash user=tom ''' procs = [] if pattern_is_regex: pattern = re.compile(str(pattern)) procs = [] for proc in psutil.process_iter(): if full: process_line = ' '.join(_get_proc_cmdline(proc)) else: process_line = _get_proc_name(proc) if pattern_is_regex: name_match = re.search(pattern, process_line) else: name_match = pattern in process_line user_match = True if user is None else user == _get_proc_username(proc) if name_match and user_match: procs.append(_get_proc_pid(proc)) return procs or None def cpu_times(per_cpu=False): ''' Return the percent of time the CPU spends in each state, e.g. user, system, idle, nice, iowait, irq, softirq. per_cpu if True return an array of percents for each CPU, otherwise aggregate all percents into one number CLI Example: .. code-block:: bash salt '*' ps.cpu_times ''' if per_cpu: result = [dict(times._asdict()) for times in psutil.cpu_times(True)] else: result = dict(psutil.cpu_times(per_cpu)._asdict()) return result def virtual_memory(): ''' .. versionadded:: 2014.7.0 Return a dict that describes statistics about system memory usage. .. note:: This function is only available in psutil version 0.6.0 and above. CLI Example: .. code-block:: bash salt '*' ps.virtual_memory ''' if psutil.version_info < (0, 6, 0): msg = 'virtual_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) return dict(psutil.virtual_memory()._asdict()) def swap_memory(): ''' .. versionadded:: 2014.7.0 Return a dict that describes swap memory statistics. .. note:: This function is only available in psutil version 0.6.0 and above. CLI Example: .. code-block:: bash salt '*' ps.swap_memory ''' if psutil.version_info < (0, 6, 0): msg = 'swap_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) return dict(psutil.swap_memory()._asdict()) def disk_partitions(all=False): ''' Return a list of disk partitions and their device, mount point, and filesystem type. all if set to False, only return local, physical partitions (hard disk, USB, CD/DVD partitions). If True, return all filesystems. CLI Example: .. code-block:: bash salt '*' ps.disk_partitions ''' result = [dict(partition._asdict()) for partition in psutil.disk_partitions(all)] return result def disk_usage(path): ''' Given a path, return a dict listing the total available space as well as the free space, and used space. CLI Example: .. code-block:: bash salt '*' ps.disk_usage /home ''' return dict(psutil.disk_usage(path)._asdict()) def disk_partition_usage(all=False): ''' Return a list of disk partitions plus the mount point, filesystem and usage statistics. CLI Example: .. code-block:: bash salt '*' ps.disk_partition_usage ''' result = disk_partitions(all) for partition in result: partition.update(disk_usage(partition['mountpoint'])) return result def total_physical_memory(): ''' Return the total number of bytes of physical memory. CLI Example: .. code-block:: bash salt '*' ps.total_physical_memory ''' if psutil.version_info < (0, 6, 0): msg = 'virtual_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) try: return psutil.virtual_memory().total except AttributeError: # TOTAL_PHYMEM is deprecated but with older psutil versions this is # needed as a fallback. return psutil.TOTAL_PHYMEM def num_cpus(): ''' Return the number of CPUs. CLI Example: .. code-block:: bash salt '*' ps.num_cpus ''' try: return psutil.cpu_count() except AttributeError: # NUM_CPUS is deprecated but with older psutil versions this is needed # as a fallback. return psutil.NUM_CPUS def boot_time(time_format=None): ''' Return the boot time in number of seconds since the epoch began. CLI Example: time_format Optionally specify a `strftime`_ format string. Use ``time_format='%c'`` to get a nicely-formatted locale specific date and time (i.e. ``Fri May 2 19:08:32 2014``). .. _strftime: https://docs.python.org/2/library/datetime.html#strftime-strptime-behavior .. versionadded:: 2014.1.4 .. code-block:: bash salt '*' ps.boot_time ''' try: b_time = int(psutil.boot_time()) except AttributeError: # get_boot_time() has been removed in newer psutil versions, and has # been replaced by boot_time() which provides the same information. b_time = int(psutil.boot_time()) if time_format: # Load epoch timestamp as a datetime.datetime object b_time = datetime.datetime.fromtimestamp(b_time) try: return b_time.strftime(time_format) except TypeError as exc: raise SaltInvocationError('Invalid format string: {0}'.format(exc)) return b_time def network_io_counters(interface=None): ''' Return network I/O statistics. CLI Example: .. code-block:: bash salt '*' ps.network_io_counters salt '*' ps.network_io_counters interface=eth0 ''' if not interface: return dict(psutil.net_io_counters()._asdict()) else: stats = psutil.net_io_counters(pernic=True) if interface in stats: return dict(stats[interface]._asdict()) else: return False def disk_io_counters(device=None): ''' Return disk I/O statistics. CLI Example: .. code-block:: bash salt '*' ps.disk_io_counters salt '*' ps.disk_io_counters device=sda1 ''' if not device: return dict(psutil.disk_io_counters()._asdict()) else: stats = psutil.disk_io_counters(perdisk=True) if device in stats: return dict(stats[device]._asdict()) else: return False def get_users(): ''' Return logged-in users. CLI Example: .. code-block:: bash salt '*' ps.get_users ''' try: recs = psutil.users() return [dict(x._asdict()) for x in recs] except AttributeError: # get_users is only present in psutil > v0.5.0 # try utmp try: import utmp # pylint: disable=import-error result = [] while True: rec = utmp.utmpaccess.getutent() if rec is None: return result elif rec[0] == 7: started = rec[8] if isinstance(started, tuple): started = started[0] result.append({'name': rec[4], 'terminal': rec[2], 'started': started, 'host': rec[5]}) except ImportError: return False def lsof(name): ''' Retrieve the lsof information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.lsof apache2 ''' sanitize_name = six.text_type(name) lsof_infos = __salt__['cmd.run']("lsof -c " + sanitize_name) ret = [] ret.extend([sanitize_name, lsof_infos]) return ret @salt.utils.decorators.path.which('netstat') def netstat(name): ''' Retrieve the netstat information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.netstat apache2 ''' sanitize_name = six.text_type(name) netstat_infos = __salt__['cmd.run']("netstat -nap") found_infos = [] ret = [] for info in netstat_infos.splitlines(): if info.find(sanitize_name) != -1: found_infos.append(info) ret.extend([sanitize_name, found_infos]) return ret @salt.utils.decorators.path.which('ss') def ss(name): ''' Retrieve the ss information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.ss apache2 .. versionadded:: 2016.11.6 ''' sanitize_name = six.text_type(name) ss_infos = __salt__['cmd.run']("ss -neap") found_infos = [] ret = [] for info in ss_infos.splitlines(): if info.find(sanitize_name) != -1: found_infos.append(info) ret.extend([sanitize_name, found_infos]) return ret def psaux(name): ''' Retrieve information corresponding to a "ps aux" filtered with the given pattern. It could be just a name or a regular expression (using python search from "re" module). CLI Example: .. code-block:: bash salt '*' ps.psaux www-data.+apache2 ''' sanitize_name = six.text_type(name) pattern = re.compile(sanitize_name) salt_exception_pattern = re.compile("salt.+ps.psaux.+") ps_aux = __salt__['cmd.run']("ps aux") found_infos = [] ret = [] nb_lines = 0 for info in ps_aux.splitlines(): found = pattern.search(info) if found is not None: # remove 'salt' command from results if not salt_exception_pattern.search(info): nb_lines += 1 found_infos.append(info) pid_count = six.text_type(nb_lines) + " occurence(s)." ret = [] ret.extend([sanitize_name, found_infos, pid_count]) return ret
saltstack/salt
salt/modules/ps.py
cpu_times
python
def cpu_times(per_cpu=False): ''' Return the percent of time the CPU spends in each state, e.g. user, system, idle, nice, iowait, irq, softirq. per_cpu if True return an array of percents for each CPU, otherwise aggregate all percents into one number CLI Example: .. code-block:: bash salt '*' ps.cpu_times ''' if per_cpu: result = [dict(times._asdict()) for times in psutil.cpu_times(True)] else: result = dict(psutil.cpu_times(per_cpu)._asdict()) return result
Return the percent of time the CPU spends in each state, e.g. user, system, idle, nice, iowait, irq, softirq. per_cpu if True return an array of percents for each CPU, otherwise aggregate all percents into one number CLI Example: .. code-block:: bash salt '*' ps.cpu_times
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/ps.py#L394-L413
null
# -*- coding: utf-8 -*- ''' A salt interface to psutil, a system and process library. See http://code.google.com/p/psutil. :depends: - psutil Python module, version 0.3.0 or later - python-utmp package (optional) ''' # Import python libs from __future__ import absolute_import, unicode_literals, print_function import time import datetime import re # Import salt libs import salt.utils.data from salt.exceptions import SaltInvocationError, CommandExecutionError # Import third party libs import salt.utils.decorators.path from salt.ext import six # pylint: disable=import-error try: import salt.utils.psutil_compat as psutil HAS_PSUTIL = True PSUTIL2 = getattr(psutil, 'version_info', ()) >= (2, 0) except ImportError: HAS_PSUTIL = False # pylint: enable=import-error def __virtual__(): if not HAS_PSUTIL: return False, 'The ps module cannot be loaded: python module psutil not installed.' # Functions and attributes used in this execution module seem to have been # added as of psutil 0.3.0, from an inspection of the source code. Only # make this module available if the version of psutil is >= 0.3.0. Note # that this may need to be tweaked if we find post-0.3.0 versions which # also have problems running the functions in this execution module, but # most distributions have already moved to later versions (for example, # as of Dec. 2013 EPEL is on 0.6.1, Debian 7 is on 0.5.1, etc.). if psutil.version_info >= (0, 3, 0): return True return (False, 'The ps execution module cannot be loaded: the psutil python module version {0} is less than 0.3.0'.format(psutil.version_info)) def _get_proc_cmdline(proc): ''' Returns the cmdline of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.cmdline() if PSUTIL2 else proc.cmdline) except (psutil.NoSuchProcess, psutil.AccessDenied): return [] def _get_proc_create_time(proc): ''' Returns the create_time of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.create_time() if PSUTIL2 else proc.create_time) except (psutil.NoSuchProcess, psutil.AccessDenied): return None def _get_proc_name(proc): ''' Returns the name of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.name() if PSUTIL2 else proc.name) except (psutil.NoSuchProcess, psutil.AccessDenied): return [] def _get_proc_status(proc): ''' Returns the status of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.status() if PSUTIL2 else proc.status) except (psutil.NoSuchProcess, psutil.AccessDenied): return None def _get_proc_username(proc): ''' Returns the username of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.username() if PSUTIL2 else proc.username) except (psutil.NoSuchProcess, psutil.AccessDenied, KeyError): return None def _get_proc_pid(proc): ''' Returns the pid of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' return proc.pid def top(num_processes=5, interval=3): ''' Return a list of top CPU consuming processes during the interval. num_processes = return the top N CPU consuming processes interval = the number of seconds to sample CPU usage over CLI Examples: .. code-block:: bash salt '*' ps.top salt '*' ps.top 5 10 ''' result = [] start_usage = {} for pid in psutil.pids(): try: process = psutil.Process(pid) user, system = process.cpu_times() except ValueError: user, system, _, _ = process.cpu_times() except psutil.NoSuchProcess: continue start_usage[process] = user + system time.sleep(interval) usage = set() for process, start in six.iteritems(start_usage): try: user, system = process.cpu_times() except ValueError: user, system, _, _ = process.cpu_times() except psutil.NoSuchProcess: continue now = user + system diff = now - start usage.add((diff, process)) for idx, (diff, process) in enumerate(reversed(sorted(usage))): if num_processes and idx >= num_processes: break if not _get_proc_cmdline(process): cmdline = _get_proc_name(process) else: cmdline = _get_proc_cmdline(process) info = {'cmd': cmdline, 'user': _get_proc_username(process), 'status': _get_proc_status(process), 'pid': _get_proc_pid(process), 'create_time': _get_proc_create_time(process), 'cpu': {}, 'mem': {}, } for key, value in six.iteritems(process.cpu_times()._asdict()): info['cpu'][key] = value for key, value in six.iteritems(process.memory_info()._asdict()): info['mem'][key] = value result.append(info) return result def get_pid_list(): ''' Return a list of process ids (PIDs) for all running processes. CLI Example: .. code-block:: bash salt '*' ps.get_pid_list ''' return psutil.pids() def proc_info(pid, attrs=None): ''' Return a dictionary of information for a process id (PID). CLI Example: .. code-block:: bash salt '*' ps.proc_info 2322 salt '*' ps.proc_info 2322 attrs='["pid", "name"]' pid PID of process to query. attrs Optional list of desired process attributes. The list of possible attributes can be found here: http://pythonhosted.org/psutil/#psutil.Process ''' try: proc = psutil.Process(pid) return proc.as_dict(attrs) except (psutil.NoSuchProcess, psutil.AccessDenied, AttributeError) as exc: raise CommandExecutionError(exc) def kill_pid(pid, signal=15): ''' Kill a process by PID. .. code-block:: bash salt 'minion' ps.kill_pid pid [signal=signal_number] pid PID of process to kill. signal Signal to send to the process. See manpage entry for kill for possible values. Default: 15 (SIGTERM). **Example:** Send SIGKILL to process with PID 2000: .. code-block:: bash salt 'minion' ps.kill_pid 2000 signal=9 ''' try: psutil.Process(pid).send_signal(signal) return True except psutil.NoSuchProcess: return False def pkill(pattern, user=None, signal=15, full=False): ''' Kill processes matching a pattern. .. code-block:: bash salt '*' ps.pkill pattern [user=username] [signal=signal_number] \\ [full=(true|false)] pattern Pattern to search for in the process list. user Limit matches to the given username. Default: All users. signal Signal to send to the process(es). See manpage entry for kill for possible values. Default: 15 (SIGTERM). full A boolean value indicating whether only the name of the command or the full command line should be matched against the pattern. **Examples:** Send SIGHUP to all httpd processes on all 'www' minions: .. code-block:: bash salt 'www.*' ps.pkill httpd signal=1 Send SIGKILL to all bash processes owned by user 'tom': .. code-block:: bash salt '*' ps.pkill bash signal=9 user=tom ''' killed = [] for proc in psutil.process_iter(): name_match = pattern in ' '.join(_get_proc_cmdline(proc)) if full \ else pattern in _get_proc_name(proc) user_match = True if user is None else user == _get_proc_username(proc) if name_match and user_match: try: proc.send_signal(signal) killed.append(_get_proc_pid(proc)) except psutil.NoSuchProcess: pass if not killed: return None else: return {'killed': killed} def pgrep(pattern, user=None, full=False, pattern_is_regex=False): ''' Return the pids for processes matching a pattern. If full is true, the full command line is searched for a match, otherwise only the name of the command is searched. .. code-block:: bash salt '*' ps.pgrep pattern [user=username] [full=(true|false)] pattern Pattern to search for in the process list. user Limit matches to the given username. Default: All users. full A boolean value indicating whether only the name of the command or the full command line should be matched against the pattern. pattern_is_regex This flag enables ps.pgrep to mirror the regex search functionality found in the pgrep command line utility. .. versionadded:: Neon **Examples:** Find all httpd processes on all 'www' minions: .. code-block:: bash salt 'www.*' ps.pgrep httpd Find all bash processes owned by user 'tom': .. code-block:: bash salt '*' ps.pgrep bash user=tom ''' procs = [] if pattern_is_regex: pattern = re.compile(str(pattern)) procs = [] for proc in psutil.process_iter(): if full: process_line = ' '.join(_get_proc_cmdline(proc)) else: process_line = _get_proc_name(proc) if pattern_is_regex: name_match = re.search(pattern, process_line) else: name_match = pattern in process_line user_match = True if user is None else user == _get_proc_username(proc) if name_match and user_match: procs.append(_get_proc_pid(proc)) return procs or None def cpu_percent(interval=0.1, per_cpu=False): ''' Return the percent of time the CPU is busy. interval the number of seconds to sample CPU usage over per_cpu if True return an array of CPU percent busy for each CPU, otherwise aggregate all percents into one number CLI Example: .. code-block:: bash salt '*' ps.cpu_percent ''' if per_cpu: result = list(psutil.cpu_percent(interval, True)) else: result = psutil.cpu_percent(interval) return result def virtual_memory(): ''' .. versionadded:: 2014.7.0 Return a dict that describes statistics about system memory usage. .. note:: This function is only available in psutil version 0.6.0 and above. CLI Example: .. code-block:: bash salt '*' ps.virtual_memory ''' if psutil.version_info < (0, 6, 0): msg = 'virtual_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) return dict(psutil.virtual_memory()._asdict()) def swap_memory(): ''' .. versionadded:: 2014.7.0 Return a dict that describes swap memory statistics. .. note:: This function is only available in psutil version 0.6.0 and above. CLI Example: .. code-block:: bash salt '*' ps.swap_memory ''' if psutil.version_info < (0, 6, 0): msg = 'swap_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) return dict(psutil.swap_memory()._asdict()) def disk_partitions(all=False): ''' Return a list of disk partitions and their device, mount point, and filesystem type. all if set to False, only return local, physical partitions (hard disk, USB, CD/DVD partitions). If True, return all filesystems. CLI Example: .. code-block:: bash salt '*' ps.disk_partitions ''' result = [dict(partition._asdict()) for partition in psutil.disk_partitions(all)] return result def disk_usage(path): ''' Given a path, return a dict listing the total available space as well as the free space, and used space. CLI Example: .. code-block:: bash salt '*' ps.disk_usage /home ''' return dict(psutil.disk_usage(path)._asdict()) def disk_partition_usage(all=False): ''' Return a list of disk partitions plus the mount point, filesystem and usage statistics. CLI Example: .. code-block:: bash salt '*' ps.disk_partition_usage ''' result = disk_partitions(all) for partition in result: partition.update(disk_usage(partition['mountpoint'])) return result def total_physical_memory(): ''' Return the total number of bytes of physical memory. CLI Example: .. code-block:: bash salt '*' ps.total_physical_memory ''' if psutil.version_info < (0, 6, 0): msg = 'virtual_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) try: return psutil.virtual_memory().total except AttributeError: # TOTAL_PHYMEM is deprecated but with older psutil versions this is # needed as a fallback. return psutil.TOTAL_PHYMEM def num_cpus(): ''' Return the number of CPUs. CLI Example: .. code-block:: bash salt '*' ps.num_cpus ''' try: return psutil.cpu_count() except AttributeError: # NUM_CPUS is deprecated but with older psutil versions this is needed # as a fallback. return psutil.NUM_CPUS def boot_time(time_format=None): ''' Return the boot time in number of seconds since the epoch began. CLI Example: time_format Optionally specify a `strftime`_ format string. Use ``time_format='%c'`` to get a nicely-formatted locale specific date and time (i.e. ``Fri May 2 19:08:32 2014``). .. _strftime: https://docs.python.org/2/library/datetime.html#strftime-strptime-behavior .. versionadded:: 2014.1.4 .. code-block:: bash salt '*' ps.boot_time ''' try: b_time = int(psutil.boot_time()) except AttributeError: # get_boot_time() has been removed in newer psutil versions, and has # been replaced by boot_time() which provides the same information. b_time = int(psutil.boot_time()) if time_format: # Load epoch timestamp as a datetime.datetime object b_time = datetime.datetime.fromtimestamp(b_time) try: return b_time.strftime(time_format) except TypeError as exc: raise SaltInvocationError('Invalid format string: {0}'.format(exc)) return b_time def network_io_counters(interface=None): ''' Return network I/O statistics. CLI Example: .. code-block:: bash salt '*' ps.network_io_counters salt '*' ps.network_io_counters interface=eth0 ''' if not interface: return dict(psutil.net_io_counters()._asdict()) else: stats = psutil.net_io_counters(pernic=True) if interface in stats: return dict(stats[interface]._asdict()) else: return False def disk_io_counters(device=None): ''' Return disk I/O statistics. CLI Example: .. code-block:: bash salt '*' ps.disk_io_counters salt '*' ps.disk_io_counters device=sda1 ''' if not device: return dict(psutil.disk_io_counters()._asdict()) else: stats = psutil.disk_io_counters(perdisk=True) if device in stats: return dict(stats[device]._asdict()) else: return False def get_users(): ''' Return logged-in users. CLI Example: .. code-block:: bash salt '*' ps.get_users ''' try: recs = psutil.users() return [dict(x._asdict()) for x in recs] except AttributeError: # get_users is only present in psutil > v0.5.0 # try utmp try: import utmp # pylint: disable=import-error result = [] while True: rec = utmp.utmpaccess.getutent() if rec is None: return result elif rec[0] == 7: started = rec[8] if isinstance(started, tuple): started = started[0] result.append({'name': rec[4], 'terminal': rec[2], 'started': started, 'host': rec[5]}) except ImportError: return False def lsof(name): ''' Retrieve the lsof information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.lsof apache2 ''' sanitize_name = six.text_type(name) lsof_infos = __salt__['cmd.run']("lsof -c " + sanitize_name) ret = [] ret.extend([sanitize_name, lsof_infos]) return ret @salt.utils.decorators.path.which('netstat') def netstat(name): ''' Retrieve the netstat information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.netstat apache2 ''' sanitize_name = six.text_type(name) netstat_infos = __salt__['cmd.run']("netstat -nap") found_infos = [] ret = [] for info in netstat_infos.splitlines(): if info.find(sanitize_name) != -1: found_infos.append(info) ret.extend([sanitize_name, found_infos]) return ret @salt.utils.decorators.path.which('ss') def ss(name): ''' Retrieve the ss information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.ss apache2 .. versionadded:: 2016.11.6 ''' sanitize_name = six.text_type(name) ss_infos = __salt__['cmd.run']("ss -neap") found_infos = [] ret = [] for info in ss_infos.splitlines(): if info.find(sanitize_name) != -1: found_infos.append(info) ret.extend([sanitize_name, found_infos]) return ret def psaux(name): ''' Retrieve information corresponding to a "ps aux" filtered with the given pattern. It could be just a name or a regular expression (using python search from "re" module). CLI Example: .. code-block:: bash salt '*' ps.psaux www-data.+apache2 ''' sanitize_name = six.text_type(name) pattern = re.compile(sanitize_name) salt_exception_pattern = re.compile("salt.+ps.psaux.+") ps_aux = __salt__['cmd.run']("ps aux") found_infos = [] ret = [] nb_lines = 0 for info in ps_aux.splitlines(): found = pattern.search(info) if found is not None: # remove 'salt' command from results if not salt_exception_pattern.search(info): nb_lines += 1 found_infos.append(info) pid_count = six.text_type(nb_lines) + " occurence(s)." ret = [] ret.extend([sanitize_name, found_infos, pid_count]) return ret
saltstack/salt
salt/modules/ps.py
virtual_memory
python
def virtual_memory(): ''' .. versionadded:: 2014.7.0 Return a dict that describes statistics about system memory usage. .. note:: This function is only available in psutil version 0.6.0 and above. CLI Example: .. code-block:: bash salt '*' ps.virtual_memory ''' if psutil.version_info < (0, 6, 0): msg = 'virtual_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) return dict(psutil.virtual_memory()._asdict())
.. versionadded:: 2014.7.0 Return a dict that describes statistics about system memory usage. .. note:: This function is only available in psutil version 0.6.0 and above. CLI Example: .. code-block:: bash salt '*' ps.virtual_memory
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/ps.py#L416-L435
null
# -*- coding: utf-8 -*- ''' A salt interface to psutil, a system and process library. See http://code.google.com/p/psutil. :depends: - psutil Python module, version 0.3.0 or later - python-utmp package (optional) ''' # Import python libs from __future__ import absolute_import, unicode_literals, print_function import time import datetime import re # Import salt libs import salt.utils.data from salt.exceptions import SaltInvocationError, CommandExecutionError # Import third party libs import salt.utils.decorators.path from salt.ext import six # pylint: disable=import-error try: import salt.utils.psutil_compat as psutil HAS_PSUTIL = True PSUTIL2 = getattr(psutil, 'version_info', ()) >= (2, 0) except ImportError: HAS_PSUTIL = False # pylint: enable=import-error def __virtual__(): if not HAS_PSUTIL: return False, 'The ps module cannot be loaded: python module psutil not installed.' # Functions and attributes used in this execution module seem to have been # added as of psutil 0.3.0, from an inspection of the source code. Only # make this module available if the version of psutil is >= 0.3.0. Note # that this may need to be tweaked if we find post-0.3.0 versions which # also have problems running the functions in this execution module, but # most distributions have already moved to later versions (for example, # as of Dec. 2013 EPEL is on 0.6.1, Debian 7 is on 0.5.1, etc.). if psutil.version_info >= (0, 3, 0): return True return (False, 'The ps execution module cannot be loaded: the psutil python module version {0} is less than 0.3.0'.format(psutil.version_info)) def _get_proc_cmdline(proc): ''' Returns the cmdline of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.cmdline() if PSUTIL2 else proc.cmdline) except (psutil.NoSuchProcess, psutil.AccessDenied): return [] def _get_proc_create_time(proc): ''' Returns the create_time of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.create_time() if PSUTIL2 else proc.create_time) except (psutil.NoSuchProcess, psutil.AccessDenied): return None def _get_proc_name(proc): ''' Returns the name of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.name() if PSUTIL2 else proc.name) except (psutil.NoSuchProcess, psutil.AccessDenied): return [] def _get_proc_status(proc): ''' Returns the status of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.status() if PSUTIL2 else proc.status) except (psutil.NoSuchProcess, psutil.AccessDenied): return None def _get_proc_username(proc): ''' Returns the username of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.username() if PSUTIL2 else proc.username) except (psutil.NoSuchProcess, psutil.AccessDenied, KeyError): return None def _get_proc_pid(proc): ''' Returns the pid of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' return proc.pid def top(num_processes=5, interval=3): ''' Return a list of top CPU consuming processes during the interval. num_processes = return the top N CPU consuming processes interval = the number of seconds to sample CPU usage over CLI Examples: .. code-block:: bash salt '*' ps.top salt '*' ps.top 5 10 ''' result = [] start_usage = {} for pid in psutil.pids(): try: process = psutil.Process(pid) user, system = process.cpu_times() except ValueError: user, system, _, _ = process.cpu_times() except psutil.NoSuchProcess: continue start_usage[process] = user + system time.sleep(interval) usage = set() for process, start in six.iteritems(start_usage): try: user, system = process.cpu_times() except ValueError: user, system, _, _ = process.cpu_times() except psutil.NoSuchProcess: continue now = user + system diff = now - start usage.add((diff, process)) for idx, (diff, process) in enumerate(reversed(sorted(usage))): if num_processes and idx >= num_processes: break if not _get_proc_cmdline(process): cmdline = _get_proc_name(process) else: cmdline = _get_proc_cmdline(process) info = {'cmd': cmdline, 'user': _get_proc_username(process), 'status': _get_proc_status(process), 'pid': _get_proc_pid(process), 'create_time': _get_proc_create_time(process), 'cpu': {}, 'mem': {}, } for key, value in six.iteritems(process.cpu_times()._asdict()): info['cpu'][key] = value for key, value in six.iteritems(process.memory_info()._asdict()): info['mem'][key] = value result.append(info) return result def get_pid_list(): ''' Return a list of process ids (PIDs) for all running processes. CLI Example: .. code-block:: bash salt '*' ps.get_pid_list ''' return psutil.pids() def proc_info(pid, attrs=None): ''' Return a dictionary of information for a process id (PID). CLI Example: .. code-block:: bash salt '*' ps.proc_info 2322 salt '*' ps.proc_info 2322 attrs='["pid", "name"]' pid PID of process to query. attrs Optional list of desired process attributes. The list of possible attributes can be found here: http://pythonhosted.org/psutil/#psutil.Process ''' try: proc = psutil.Process(pid) return proc.as_dict(attrs) except (psutil.NoSuchProcess, psutil.AccessDenied, AttributeError) as exc: raise CommandExecutionError(exc) def kill_pid(pid, signal=15): ''' Kill a process by PID. .. code-block:: bash salt 'minion' ps.kill_pid pid [signal=signal_number] pid PID of process to kill. signal Signal to send to the process. See manpage entry for kill for possible values. Default: 15 (SIGTERM). **Example:** Send SIGKILL to process with PID 2000: .. code-block:: bash salt 'minion' ps.kill_pid 2000 signal=9 ''' try: psutil.Process(pid).send_signal(signal) return True except psutil.NoSuchProcess: return False def pkill(pattern, user=None, signal=15, full=False): ''' Kill processes matching a pattern. .. code-block:: bash salt '*' ps.pkill pattern [user=username] [signal=signal_number] \\ [full=(true|false)] pattern Pattern to search for in the process list. user Limit matches to the given username. Default: All users. signal Signal to send to the process(es). See manpage entry for kill for possible values. Default: 15 (SIGTERM). full A boolean value indicating whether only the name of the command or the full command line should be matched against the pattern. **Examples:** Send SIGHUP to all httpd processes on all 'www' minions: .. code-block:: bash salt 'www.*' ps.pkill httpd signal=1 Send SIGKILL to all bash processes owned by user 'tom': .. code-block:: bash salt '*' ps.pkill bash signal=9 user=tom ''' killed = [] for proc in psutil.process_iter(): name_match = pattern in ' '.join(_get_proc_cmdline(proc)) if full \ else pattern in _get_proc_name(proc) user_match = True if user is None else user == _get_proc_username(proc) if name_match and user_match: try: proc.send_signal(signal) killed.append(_get_proc_pid(proc)) except psutil.NoSuchProcess: pass if not killed: return None else: return {'killed': killed} def pgrep(pattern, user=None, full=False, pattern_is_regex=False): ''' Return the pids for processes matching a pattern. If full is true, the full command line is searched for a match, otherwise only the name of the command is searched. .. code-block:: bash salt '*' ps.pgrep pattern [user=username] [full=(true|false)] pattern Pattern to search for in the process list. user Limit matches to the given username. Default: All users. full A boolean value indicating whether only the name of the command or the full command line should be matched against the pattern. pattern_is_regex This flag enables ps.pgrep to mirror the regex search functionality found in the pgrep command line utility. .. versionadded:: Neon **Examples:** Find all httpd processes on all 'www' minions: .. code-block:: bash salt 'www.*' ps.pgrep httpd Find all bash processes owned by user 'tom': .. code-block:: bash salt '*' ps.pgrep bash user=tom ''' procs = [] if pattern_is_regex: pattern = re.compile(str(pattern)) procs = [] for proc in psutil.process_iter(): if full: process_line = ' '.join(_get_proc_cmdline(proc)) else: process_line = _get_proc_name(proc) if pattern_is_regex: name_match = re.search(pattern, process_line) else: name_match = pattern in process_line user_match = True if user is None else user == _get_proc_username(proc) if name_match and user_match: procs.append(_get_proc_pid(proc)) return procs or None def cpu_percent(interval=0.1, per_cpu=False): ''' Return the percent of time the CPU is busy. interval the number of seconds to sample CPU usage over per_cpu if True return an array of CPU percent busy for each CPU, otherwise aggregate all percents into one number CLI Example: .. code-block:: bash salt '*' ps.cpu_percent ''' if per_cpu: result = list(psutil.cpu_percent(interval, True)) else: result = psutil.cpu_percent(interval) return result def cpu_times(per_cpu=False): ''' Return the percent of time the CPU spends in each state, e.g. user, system, idle, nice, iowait, irq, softirq. per_cpu if True return an array of percents for each CPU, otherwise aggregate all percents into one number CLI Example: .. code-block:: bash salt '*' ps.cpu_times ''' if per_cpu: result = [dict(times._asdict()) for times in psutil.cpu_times(True)] else: result = dict(psutil.cpu_times(per_cpu)._asdict()) return result def swap_memory(): ''' .. versionadded:: 2014.7.0 Return a dict that describes swap memory statistics. .. note:: This function is only available in psutil version 0.6.0 and above. CLI Example: .. code-block:: bash salt '*' ps.swap_memory ''' if psutil.version_info < (0, 6, 0): msg = 'swap_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) return dict(psutil.swap_memory()._asdict()) def disk_partitions(all=False): ''' Return a list of disk partitions and their device, mount point, and filesystem type. all if set to False, only return local, physical partitions (hard disk, USB, CD/DVD partitions). If True, return all filesystems. CLI Example: .. code-block:: bash salt '*' ps.disk_partitions ''' result = [dict(partition._asdict()) for partition in psutil.disk_partitions(all)] return result def disk_usage(path): ''' Given a path, return a dict listing the total available space as well as the free space, and used space. CLI Example: .. code-block:: bash salt '*' ps.disk_usage /home ''' return dict(psutil.disk_usage(path)._asdict()) def disk_partition_usage(all=False): ''' Return a list of disk partitions plus the mount point, filesystem and usage statistics. CLI Example: .. code-block:: bash salt '*' ps.disk_partition_usage ''' result = disk_partitions(all) for partition in result: partition.update(disk_usage(partition['mountpoint'])) return result def total_physical_memory(): ''' Return the total number of bytes of physical memory. CLI Example: .. code-block:: bash salt '*' ps.total_physical_memory ''' if psutil.version_info < (0, 6, 0): msg = 'virtual_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) try: return psutil.virtual_memory().total except AttributeError: # TOTAL_PHYMEM is deprecated but with older psutil versions this is # needed as a fallback. return psutil.TOTAL_PHYMEM def num_cpus(): ''' Return the number of CPUs. CLI Example: .. code-block:: bash salt '*' ps.num_cpus ''' try: return psutil.cpu_count() except AttributeError: # NUM_CPUS is deprecated but with older psutil versions this is needed # as a fallback. return psutil.NUM_CPUS def boot_time(time_format=None): ''' Return the boot time in number of seconds since the epoch began. CLI Example: time_format Optionally specify a `strftime`_ format string. Use ``time_format='%c'`` to get a nicely-formatted locale specific date and time (i.e. ``Fri May 2 19:08:32 2014``). .. _strftime: https://docs.python.org/2/library/datetime.html#strftime-strptime-behavior .. versionadded:: 2014.1.4 .. code-block:: bash salt '*' ps.boot_time ''' try: b_time = int(psutil.boot_time()) except AttributeError: # get_boot_time() has been removed in newer psutil versions, and has # been replaced by boot_time() which provides the same information. b_time = int(psutil.boot_time()) if time_format: # Load epoch timestamp as a datetime.datetime object b_time = datetime.datetime.fromtimestamp(b_time) try: return b_time.strftime(time_format) except TypeError as exc: raise SaltInvocationError('Invalid format string: {0}'.format(exc)) return b_time def network_io_counters(interface=None): ''' Return network I/O statistics. CLI Example: .. code-block:: bash salt '*' ps.network_io_counters salt '*' ps.network_io_counters interface=eth0 ''' if not interface: return dict(psutil.net_io_counters()._asdict()) else: stats = psutil.net_io_counters(pernic=True) if interface in stats: return dict(stats[interface]._asdict()) else: return False def disk_io_counters(device=None): ''' Return disk I/O statistics. CLI Example: .. code-block:: bash salt '*' ps.disk_io_counters salt '*' ps.disk_io_counters device=sda1 ''' if not device: return dict(psutil.disk_io_counters()._asdict()) else: stats = psutil.disk_io_counters(perdisk=True) if device in stats: return dict(stats[device]._asdict()) else: return False def get_users(): ''' Return logged-in users. CLI Example: .. code-block:: bash salt '*' ps.get_users ''' try: recs = psutil.users() return [dict(x._asdict()) for x in recs] except AttributeError: # get_users is only present in psutil > v0.5.0 # try utmp try: import utmp # pylint: disable=import-error result = [] while True: rec = utmp.utmpaccess.getutent() if rec is None: return result elif rec[0] == 7: started = rec[8] if isinstance(started, tuple): started = started[0] result.append({'name': rec[4], 'terminal': rec[2], 'started': started, 'host': rec[5]}) except ImportError: return False def lsof(name): ''' Retrieve the lsof information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.lsof apache2 ''' sanitize_name = six.text_type(name) lsof_infos = __salt__['cmd.run']("lsof -c " + sanitize_name) ret = [] ret.extend([sanitize_name, lsof_infos]) return ret @salt.utils.decorators.path.which('netstat') def netstat(name): ''' Retrieve the netstat information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.netstat apache2 ''' sanitize_name = six.text_type(name) netstat_infos = __salt__['cmd.run']("netstat -nap") found_infos = [] ret = [] for info in netstat_infos.splitlines(): if info.find(sanitize_name) != -1: found_infos.append(info) ret.extend([sanitize_name, found_infos]) return ret @salt.utils.decorators.path.which('ss') def ss(name): ''' Retrieve the ss information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.ss apache2 .. versionadded:: 2016.11.6 ''' sanitize_name = six.text_type(name) ss_infos = __salt__['cmd.run']("ss -neap") found_infos = [] ret = [] for info in ss_infos.splitlines(): if info.find(sanitize_name) != -1: found_infos.append(info) ret.extend([sanitize_name, found_infos]) return ret def psaux(name): ''' Retrieve information corresponding to a "ps aux" filtered with the given pattern. It could be just a name or a regular expression (using python search from "re" module). CLI Example: .. code-block:: bash salt '*' ps.psaux www-data.+apache2 ''' sanitize_name = six.text_type(name) pattern = re.compile(sanitize_name) salt_exception_pattern = re.compile("salt.+ps.psaux.+") ps_aux = __salt__['cmd.run']("ps aux") found_infos = [] ret = [] nb_lines = 0 for info in ps_aux.splitlines(): found = pattern.search(info) if found is not None: # remove 'salt' command from results if not salt_exception_pattern.search(info): nb_lines += 1 found_infos.append(info) pid_count = six.text_type(nb_lines) + " occurence(s)." ret = [] ret.extend([sanitize_name, found_infos, pid_count]) return ret
saltstack/salt
salt/modules/ps.py
swap_memory
python
def swap_memory(): ''' .. versionadded:: 2014.7.0 Return a dict that describes swap memory statistics. .. note:: This function is only available in psutil version 0.6.0 and above. CLI Example: .. code-block:: bash salt '*' ps.swap_memory ''' if psutil.version_info < (0, 6, 0): msg = 'swap_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) return dict(psutil.swap_memory()._asdict())
.. versionadded:: 2014.7.0 Return a dict that describes swap memory statistics. .. note:: This function is only available in psutil version 0.6.0 and above. CLI Example: .. code-block:: bash salt '*' ps.swap_memory
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/ps.py#L438-L457
null
# -*- coding: utf-8 -*- ''' A salt interface to psutil, a system and process library. See http://code.google.com/p/psutil. :depends: - psutil Python module, version 0.3.0 or later - python-utmp package (optional) ''' # Import python libs from __future__ import absolute_import, unicode_literals, print_function import time import datetime import re # Import salt libs import salt.utils.data from salt.exceptions import SaltInvocationError, CommandExecutionError # Import third party libs import salt.utils.decorators.path from salt.ext import six # pylint: disable=import-error try: import salt.utils.psutil_compat as psutil HAS_PSUTIL = True PSUTIL2 = getattr(psutil, 'version_info', ()) >= (2, 0) except ImportError: HAS_PSUTIL = False # pylint: enable=import-error def __virtual__(): if not HAS_PSUTIL: return False, 'The ps module cannot be loaded: python module psutil not installed.' # Functions and attributes used in this execution module seem to have been # added as of psutil 0.3.0, from an inspection of the source code. Only # make this module available if the version of psutil is >= 0.3.0. Note # that this may need to be tweaked if we find post-0.3.0 versions which # also have problems running the functions in this execution module, but # most distributions have already moved to later versions (for example, # as of Dec. 2013 EPEL is on 0.6.1, Debian 7 is on 0.5.1, etc.). if psutil.version_info >= (0, 3, 0): return True return (False, 'The ps execution module cannot be loaded: the psutil python module version {0} is less than 0.3.0'.format(psutil.version_info)) def _get_proc_cmdline(proc): ''' Returns the cmdline of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.cmdline() if PSUTIL2 else proc.cmdline) except (psutil.NoSuchProcess, psutil.AccessDenied): return [] def _get_proc_create_time(proc): ''' Returns the create_time of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.create_time() if PSUTIL2 else proc.create_time) except (psutil.NoSuchProcess, psutil.AccessDenied): return None def _get_proc_name(proc): ''' Returns the name of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.name() if PSUTIL2 else proc.name) except (psutil.NoSuchProcess, psutil.AccessDenied): return [] def _get_proc_status(proc): ''' Returns the status of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.status() if PSUTIL2 else proc.status) except (psutil.NoSuchProcess, psutil.AccessDenied): return None def _get_proc_username(proc): ''' Returns the username of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.username() if PSUTIL2 else proc.username) except (psutil.NoSuchProcess, psutil.AccessDenied, KeyError): return None def _get_proc_pid(proc): ''' Returns the pid of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' return proc.pid def top(num_processes=5, interval=3): ''' Return a list of top CPU consuming processes during the interval. num_processes = return the top N CPU consuming processes interval = the number of seconds to sample CPU usage over CLI Examples: .. code-block:: bash salt '*' ps.top salt '*' ps.top 5 10 ''' result = [] start_usage = {} for pid in psutil.pids(): try: process = psutil.Process(pid) user, system = process.cpu_times() except ValueError: user, system, _, _ = process.cpu_times() except psutil.NoSuchProcess: continue start_usage[process] = user + system time.sleep(interval) usage = set() for process, start in six.iteritems(start_usage): try: user, system = process.cpu_times() except ValueError: user, system, _, _ = process.cpu_times() except psutil.NoSuchProcess: continue now = user + system diff = now - start usage.add((diff, process)) for idx, (diff, process) in enumerate(reversed(sorted(usage))): if num_processes and idx >= num_processes: break if not _get_proc_cmdline(process): cmdline = _get_proc_name(process) else: cmdline = _get_proc_cmdline(process) info = {'cmd': cmdline, 'user': _get_proc_username(process), 'status': _get_proc_status(process), 'pid': _get_proc_pid(process), 'create_time': _get_proc_create_time(process), 'cpu': {}, 'mem': {}, } for key, value in six.iteritems(process.cpu_times()._asdict()): info['cpu'][key] = value for key, value in six.iteritems(process.memory_info()._asdict()): info['mem'][key] = value result.append(info) return result def get_pid_list(): ''' Return a list of process ids (PIDs) for all running processes. CLI Example: .. code-block:: bash salt '*' ps.get_pid_list ''' return psutil.pids() def proc_info(pid, attrs=None): ''' Return a dictionary of information for a process id (PID). CLI Example: .. code-block:: bash salt '*' ps.proc_info 2322 salt '*' ps.proc_info 2322 attrs='["pid", "name"]' pid PID of process to query. attrs Optional list of desired process attributes. The list of possible attributes can be found here: http://pythonhosted.org/psutil/#psutil.Process ''' try: proc = psutil.Process(pid) return proc.as_dict(attrs) except (psutil.NoSuchProcess, psutil.AccessDenied, AttributeError) as exc: raise CommandExecutionError(exc) def kill_pid(pid, signal=15): ''' Kill a process by PID. .. code-block:: bash salt 'minion' ps.kill_pid pid [signal=signal_number] pid PID of process to kill. signal Signal to send to the process. See manpage entry for kill for possible values. Default: 15 (SIGTERM). **Example:** Send SIGKILL to process with PID 2000: .. code-block:: bash salt 'minion' ps.kill_pid 2000 signal=9 ''' try: psutil.Process(pid).send_signal(signal) return True except psutil.NoSuchProcess: return False def pkill(pattern, user=None, signal=15, full=False): ''' Kill processes matching a pattern. .. code-block:: bash salt '*' ps.pkill pattern [user=username] [signal=signal_number] \\ [full=(true|false)] pattern Pattern to search for in the process list. user Limit matches to the given username. Default: All users. signal Signal to send to the process(es). See manpage entry for kill for possible values. Default: 15 (SIGTERM). full A boolean value indicating whether only the name of the command or the full command line should be matched against the pattern. **Examples:** Send SIGHUP to all httpd processes on all 'www' minions: .. code-block:: bash salt 'www.*' ps.pkill httpd signal=1 Send SIGKILL to all bash processes owned by user 'tom': .. code-block:: bash salt '*' ps.pkill bash signal=9 user=tom ''' killed = [] for proc in psutil.process_iter(): name_match = pattern in ' '.join(_get_proc_cmdline(proc)) if full \ else pattern in _get_proc_name(proc) user_match = True if user is None else user == _get_proc_username(proc) if name_match and user_match: try: proc.send_signal(signal) killed.append(_get_proc_pid(proc)) except psutil.NoSuchProcess: pass if not killed: return None else: return {'killed': killed} def pgrep(pattern, user=None, full=False, pattern_is_regex=False): ''' Return the pids for processes matching a pattern. If full is true, the full command line is searched for a match, otherwise only the name of the command is searched. .. code-block:: bash salt '*' ps.pgrep pattern [user=username] [full=(true|false)] pattern Pattern to search for in the process list. user Limit matches to the given username. Default: All users. full A boolean value indicating whether only the name of the command or the full command line should be matched against the pattern. pattern_is_regex This flag enables ps.pgrep to mirror the regex search functionality found in the pgrep command line utility. .. versionadded:: Neon **Examples:** Find all httpd processes on all 'www' minions: .. code-block:: bash salt 'www.*' ps.pgrep httpd Find all bash processes owned by user 'tom': .. code-block:: bash salt '*' ps.pgrep bash user=tom ''' procs = [] if pattern_is_regex: pattern = re.compile(str(pattern)) procs = [] for proc in psutil.process_iter(): if full: process_line = ' '.join(_get_proc_cmdline(proc)) else: process_line = _get_proc_name(proc) if pattern_is_regex: name_match = re.search(pattern, process_line) else: name_match = pattern in process_line user_match = True if user is None else user == _get_proc_username(proc) if name_match and user_match: procs.append(_get_proc_pid(proc)) return procs or None def cpu_percent(interval=0.1, per_cpu=False): ''' Return the percent of time the CPU is busy. interval the number of seconds to sample CPU usage over per_cpu if True return an array of CPU percent busy for each CPU, otherwise aggregate all percents into one number CLI Example: .. code-block:: bash salt '*' ps.cpu_percent ''' if per_cpu: result = list(psutil.cpu_percent(interval, True)) else: result = psutil.cpu_percent(interval) return result def cpu_times(per_cpu=False): ''' Return the percent of time the CPU spends in each state, e.g. user, system, idle, nice, iowait, irq, softirq. per_cpu if True return an array of percents for each CPU, otherwise aggregate all percents into one number CLI Example: .. code-block:: bash salt '*' ps.cpu_times ''' if per_cpu: result = [dict(times._asdict()) for times in psutil.cpu_times(True)] else: result = dict(psutil.cpu_times(per_cpu)._asdict()) return result def virtual_memory(): ''' .. versionadded:: 2014.7.0 Return a dict that describes statistics about system memory usage. .. note:: This function is only available in psutil version 0.6.0 and above. CLI Example: .. code-block:: bash salt '*' ps.virtual_memory ''' if psutil.version_info < (0, 6, 0): msg = 'virtual_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) return dict(psutil.virtual_memory()._asdict()) def disk_partitions(all=False): ''' Return a list of disk partitions and their device, mount point, and filesystem type. all if set to False, only return local, physical partitions (hard disk, USB, CD/DVD partitions). If True, return all filesystems. CLI Example: .. code-block:: bash salt '*' ps.disk_partitions ''' result = [dict(partition._asdict()) for partition in psutil.disk_partitions(all)] return result def disk_usage(path): ''' Given a path, return a dict listing the total available space as well as the free space, and used space. CLI Example: .. code-block:: bash salt '*' ps.disk_usage /home ''' return dict(psutil.disk_usage(path)._asdict()) def disk_partition_usage(all=False): ''' Return a list of disk partitions plus the mount point, filesystem and usage statistics. CLI Example: .. code-block:: bash salt '*' ps.disk_partition_usage ''' result = disk_partitions(all) for partition in result: partition.update(disk_usage(partition['mountpoint'])) return result def total_physical_memory(): ''' Return the total number of bytes of physical memory. CLI Example: .. code-block:: bash salt '*' ps.total_physical_memory ''' if psutil.version_info < (0, 6, 0): msg = 'virtual_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) try: return psutil.virtual_memory().total except AttributeError: # TOTAL_PHYMEM is deprecated but with older psutil versions this is # needed as a fallback. return psutil.TOTAL_PHYMEM def num_cpus(): ''' Return the number of CPUs. CLI Example: .. code-block:: bash salt '*' ps.num_cpus ''' try: return psutil.cpu_count() except AttributeError: # NUM_CPUS is deprecated but with older psutil versions this is needed # as a fallback. return psutil.NUM_CPUS def boot_time(time_format=None): ''' Return the boot time in number of seconds since the epoch began. CLI Example: time_format Optionally specify a `strftime`_ format string. Use ``time_format='%c'`` to get a nicely-formatted locale specific date and time (i.e. ``Fri May 2 19:08:32 2014``). .. _strftime: https://docs.python.org/2/library/datetime.html#strftime-strptime-behavior .. versionadded:: 2014.1.4 .. code-block:: bash salt '*' ps.boot_time ''' try: b_time = int(psutil.boot_time()) except AttributeError: # get_boot_time() has been removed in newer psutil versions, and has # been replaced by boot_time() which provides the same information. b_time = int(psutil.boot_time()) if time_format: # Load epoch timestamp as a datetime.datetime object b_time = datetime.datetime.fromtimestamp(b_time) try: return b_time.strftime(time_format) except TypeError as exc: raise SaltInvocationError('Invalid format string: {0}'.format(exc)) return b_time def network_io_counters(interface=None): ''' Return network I/O statistics. CLI Example: .. code-block:: bash salt '*' ps.network_io_counters salt '*' ps.network_io_counters interface=eth0 ''' if not interface: return dict(psutil.net_io_counters()._asdict()) else: stats = psutil.net_io_counters(pernic=True) if interface in stats: return dict(stats[interface]._asdict()) else: return False def disk_io_counters(device=None): ''' Return disk I/O statistics. CLI Example: .. code-block:: bash salt '*' ps.disk_io_counters salt '*' ps.disk_io_counters device=sda1 ''' if not device: return dict(psutil.disk_io_counters()._asdict()) else: stats = psutil.disk_io_counters(perdisk=True) if device in stats: return dict(stats[device]._asdict()) else: return False def get_users(): ''' Return logged-in users. CLI Example: .. code-block:: bash salt '*' ps.get_users ''' try: recs = psutil.users() return [dict(x._asdict()) for x in recs] except AttributeError: # get_users is only present in psutil > v0.5.0 # try utmp try: import utmp # pylint: disable=import-error result = [] while True: rec = utmp.utmpaccess.getutent() if rec is None: return result elif rec[0] == 7: started = rec[8] if isinstance(started, tuple): started = started[0] result.append({'name': rec[4], 'terminal': rec[2], 'started': started, 'host': rec[5]}) except ImportError: return False def lsof(name): ''' Retrieve the lsof information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.lsof apache2 ''' sanitize_name = six.text_type(name) lsof_infos = __salt__['cmd.run']("lsof -c " + sanitize_name) ret = [] ret.extend([sanitize_name, lsof_infos]) return ret @salt.utils.decorators.path.which('netstat') def netstat(name): ''' Retrieve the netstat information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.netstat apache2 ''' sanitize_name = six.text_type(name) netstat_infos = __salt__['cmd.run']("netstat -nap") found_infos = [] ret = [] for info in netstat_infos.splitlines(): if info.find(sanitize_name) != -1: found_infos.append(info) ret.extend([sanitize_name, found_infos]) return ret @salt.utils.decorators.path.which('ss') def ss(name): ''' Retrieve the ss information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.ss apache2 .. versionadded:: 2016.11.6 ''' sanitize_name = six.text_type(name) ss_infos = __salt__['cmd.run']("ss -neap") found_infos = [] ret = [] for info in ss_infos.splitlines(): if info.find(sanitize_name) != -1: found_infos.append(info) ret.extend([sanitize_name, found_infos]) return ret def psaux(name): ''' Retrieve information corresponding to a "ps aux" filtered with the given pattern. It could be just a name or a regular expression (using python search from "re" module). CLI Example: .. code-block:: bash salt '*' ps.psaux www-data.+apache2 ''' sanitize_name = six.text_type(name) pattern = re.compile(sanitize_name) salt_exception_pattern = re.compile("salt.+ps.psaux.+") ps_aux = __salt__['cmd.run']("ps aux") found_infos = [] ret = [] nb_lines = 0 for info in ps_aux.splitlines(): found = pattern.search(info) if found is not None: # remove 'salt' command from results if not salt_exception_pattern.search(info): nb_lines += 1 found_infos.append(info) pid_count = six.text_type(nb_lines) + " occurence(s)." ret = [] ret.extend([sanitize_name, found_infos, pid_count]) return ret
saltstack/salt
salt/modules/ps.py
disk_partitions
python
def disk_partitions(all=False): ''' Return a list of disk partitions and their device, mount point, and filesystem type. all if set to False, only return local, physical partitions (hard disk, USB, CD/DVD partitions). If True, return all filesystems. CLI Example: .. code-block:: bash salt '*' ps.disk_partitions ''' result = [dict(partition._asdict()) for partition in psutil.disk_partitions(all)] return result
Return a list of disk partitions and their device, mount point, and filesystem type. all if set to False, only return local, physical partitions (hard disk, USB, CD/DVD partitions). If True, return all filesystems. CLI Example: .. code-block:: bash salt '*' ps.disk_partitions
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/ps.py#L460-L477
null
# -*- coding: utf-8 -*- ''' A salt interface to psutil, a system and process library. See http://code.google.com/p/psutil. :depends: - psutil Python module, version 0.3.0 or later - python-utmp package (optional) ''' # Import python libs from __future__ import absolute_import, unicode_literals, print_function import time import datetime import re # Import salt libs import salt.utils.data from salt.exceptions import SaltInvocationError, CommandExecutionError # Import third party libs import salt.utils.decorators.path from salt.ext import six # pylint: disable=import-error try: import salt.utils.psutil_compat as psutil HAS_PSUTIL = True PSUTIL2 = getattr(psutil, 'version_info', ()) >= (2, 0) except ImportError: HAS_PSUTIL = False # pylint: enable=import-error def __virtual__(): if not HAS_PSUTIL: return False, 'The ps module cannot be loaded: python module psutil not installed.' # Functions and attributes used in this execution module seem to have been # added as of psutil 0.3.0, from an inspection of the source code. Only # make this module available if the version of psutil is >= 0.3.0. Note # that this may need to be tweaked if we find post-0.3.0 versions which # also have problems running the functions in this execution module, but # most distributions have already moved to later versions (for example, # as of Dec. 2013 EPEL is on 0.6.1, Debian 7 is on 0.5.1, etc.). if psutil.version_info >= (0, 3, 0): return True return (False, 'The ps execution module cannot be loaded: the psutil python module version {0} is less than 0.3.0'.format(psutil.version_info)) def _get_proc_cmdline(proc): ''' Returns the cmdline of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.cmdline() if PSUTIL2 else proc.cmdline) except (psutil.NoSuchProcess, psutil.AccessDenied): return [] def _get_proc_create_time(proc): ''' Returns the create_time of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.create_time() if PSUTIL2 else proc.create_time) except (psutil.NoSuchProcess, psutil.AccessDenied): return None def _get_proc_name(proc): ''' Returns the name of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.name() if PSUTIL2 else proc.name) except (psutil.NoSuchProcess, psutil.AccessDenied): return [] def _get_proc_status(proc): ''' Returns the status of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.status() if PSUTIL2 else proc.status) except (psutil.NoSuchProcess, psutil.AccessDenied): return None def _get_proc_username(proc): ''' Returns the username of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.username() if PSUTIL2 else proc.username) except (psutil.NoSuchProcess, psutil.AccessDenied, KeyError): return None def _get_proc_pid(proc): ''' Returns the pid of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' return proc.pid def top(num_processes=5, interval=3): ''' Return a list of top CPU consuming processes during the interval. num_processes = return the top N CPU consuming processes interval = the number of seconds to sample CPU usage over CLI Examples: .. code-block:: bash salt '*' ps.top salt '*' ps.top 5 10 ''' result = [] start_usage = {} for pid in psutil.pids(): try: process = psutil.Process(pid) user, system = process.cpu_times() except ValueError: user, system, _, _ = process.cpu_times() except psutil.NoSuchProcess: continue start_usage[process] = user + system time.sleep(interval) usage = set() for process, start in six.iteritems(start_usage): try: user, system = process.cpu_times() except ValueError: user, system, _, _ = process.cpu_times() except psutil.NoSuchProcess: continue now = user + system diff = now - start usage.add((diff, process)) for idx, (diff, process) in enumerate(reversed(sorted(usage))): if num_processes and idx >= num_processes: break if not _get_proc_cmdline(process): cmdline = _get_proc_name(process) else: cmdline = _get_proc_cmdline(process) info = {'cmd': cmdline, 'user': _get_proc_username(process), 'status': _get_proc_status(process), 'pid': _get_proc_pid(process), 'create_time': _get_proc_create_time(process), 'cpu': {}, 'mem': {}, } for key, value in six.iteritems(process.cpu_times()._asdict()): info['cpu'][key] = value for key, value in six.iteritems(process.memory_info()._asdict()): info['mem'][key] = value result.append(info) return result def get_pid_list(): ''' Return a list of process ids (PIDs) for all running processes. CLI Example: .. code-block:: bash salt '*' ps.get_pid_list ''' return psutil.pids() def proc_info(pid, attrs=None): ''' Return a dictionary of information for a process id (PID). CLI Example: .. code-block:: bash salt '*' ps.proc_info 2322 salt '*' ps.proc_info 2322 attrs='["pid", "name"]' pid PID of process to query. attrs Optional list of desired process attributes. The list of possible attributes can be found here: http://pythonhosted.org/psutil/#psutil.Process ''' try: proc = psutil.Process(pid) return proc.as_dict(attrs) except (psutil.NoSuchProcess, psutil.AccessDenied, AttributeError) as exc: raise CommandExecutionError(exc) def kill_pid(pid, signal=15): ''' Kill a process by PID. .. code-block:: bash salt 'minion' ps.kill_pid pid [signal=signal_number] pid PID of process to kill. signal Signal to send to the process. See manpage entry for kill for possible values. Default: 15 (SIGTERM). **Example:** Send SIGKILL to process with PID 2000: .. code-block:: bash salt 'minion' ps.kill_pid 2000 signal=9 ''' try: psutil.Process(pid).send_signal(signal) return True except psutil.NoSuchProcess: return False def pkill(pattern, user=None, signal=15, full=False): ''' Kill processes matching a pattern. .. code-block:: bash salt '*' ps.pkill pattern [user=username] [signal=signal_number] \\ [full=(true|false)] pattern Pattern to search for in the process list. user Limit matches to the given username. Default: All users. signal Signal to send to the process(es). See manpage entry for kill for possible values. Default: 15 (SIGTERM). full A boolean value indicating whether only the name of the command or the full command line should be matched against the pattern. **Examples:** Send SIGHUP to all httpd processes on all 'www' minions: .. code-block:: bash salt 'www.*' ps.pkill httpd signal=1 Send SIGKILL to all bash processes owned by user 'tom': .. code-block:: bash salt '*' ps.pkill bash signal=9 user=tom ''' killed = [] for proc in psutil.process_iter(): name_match = pattern in ' '.join(_get_proc_cmdline(proc)) if full \ else pattern in _get_proc_name(proc) user_match = True if user is None else user == _get_proc_username(proc) if name_match and user_match: try: proc.send_signal(signal) killed.append(_get_proc_pid(proc)) except psutil.NoSuchProcess: pass if not killed: return None else: return {'killed': killed} def pgrep(pattern, user=None, full=False, pattern_is_regex=False): ''' Return the pids for processes matching a pattern. If full is true, the full command line is searched for a match, otherwise only the name of the command is searched. .. code-block:: bash salt '*' ps.pgrep pattern [user=username] [full=(true|false)] pattern Pattern to search for in the process list. user Limit matches to the given username. Default: All users. full A boolean value indicating whether only the name of the command or the full command line should be matched against the pattern. pattern_is_regex This flag enables ps.pgrep to mirror the regex search functionality found in the pgrep command line utility. .. versionadded:: Neon **Examples:** Find all httpd processes on all 'www' minions: .. code-block:: bash salt 'www.*' ps.pgrep httpd Find all bash processes owned by user 'tom': .. code-block:: bash salt '*' ps.pgrep bash user=tom ''' procs = [] if pattern_is_regex: pattern = re.compile(str(pattern)) procs = [] for proc in psutil.process_iter(): if full: process_line = ' '.join(_get_proc_cmdline(proc)) else: process_line = _get_proc_name(proc) if pattern_is_regex: name_match = re.search(pattern, process_line) else: name_match = pattern in process_line user_match = True if user is None else user == _get_proc_username(proc) if name_match and user_match: procs.append(_get_proc_pid(proc)) return procs or None def cpu_percent(interval=0.1, per_cpu=False): ''' Return the percent of time the CPU is busy. interval the number of seconds to sample CPU usage over per_cpu if True return an array of CPU percent busy for each CPU, otherwise aggregate all percents into one number CLI Example: .. code-block:: bash salt '*' ps.cpu_percent ''' if per_cpu: result = list(psutil.cpu_percent(interval, True)) else: result = psutil.cpu_percent(interval) return result def cpu_times(per_cpu=False): ''' Return the percent of time the CPU spends in each state, e.g. user, system, idle, nice, iowait, irq, softirq. per_cpu if True return an array of percents for each CPU, otherwise aggregate all percents into one number CLI Example: .. code-block:: bash salt '*' ps.cpu_times ''' if per_cpu: result = [dict(times._asdict()) for times in psutil.cpu_times(True)] else: result = dict(psutil.cpu_times(per_cpu)._asdict()) return result def virtual_memory(): ''' .. versionadded:: 2014.7.0 Return a dict that describes statistics about system memory usage. .. note:: This function is only available in psutil version 0.6.0 and above. CLI Example: .. code-block:: bash salt '*' ps.virtual_memory ''' if psutil.version_info < (0, 6, 0): msg = 'virtual_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) return dict(psutil.virtual_memory()._asdict()) def swap_memory(): ''' .. versionadded:: 2014.7.0 Return a dict that describes swap memory statistics. .. note:: This function is only available in psutil version 0.6.0 and above. CLI Example: .. code-block:: bash salt '*' ps.swap_memory ''' if psutil.version_info < (0, 6, 0): msg = 'swap_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) return dict(psutil.swap_memory()._asdict()) def disk_usage(path): ''' Given a path, return a dict listing the total available space as well as the free space, and used space. CLI Example: .. code-block:: bash salt '*' ps.disk_usage /home ''' return dict(psutil.disk_usage(path)._asdict()) def disk_partition_usage(all=False): ''' Return a list of disk partitions plus the mount point, filesystem and usage statistics. CLI Example: .. code-block:: bash salt '*' ps.disk_partition_usage ''' result = disk_partitions(all) for partition in result: partition.update(disk_usage(partition['mountpoint'])) return result def total_physical_memory(): ''' Return the total number of bytes of physical memory. CLI Example: .. code-block:: bash salt '*' ps.total_physical_memory ''' if psutil.version_info < (0, 6, 0): msg = 'virtual_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) try: return psutil.virtual_memory().total except AttributeError: # TOTAL_PHYMEM is deprecated but with older psutil versions this is # needed as a fallback. return psutil.TOTAL_PHYMEM def num_cpus(): ''' Return the number of CPUs. CLI Example: .. code-block:: bash salt '*' ps.num_cpus ''' try: return psutil.cpu_count() except AttributeError: # NUM_CPUS is deprecated but with older psutil versions this is needed # as a fallback. return psutil.NUM_CPUS def boot_time(time_format=None): ''' Return the boot time in number of seconds since the epoch began. CLI Example: time_format Optionally specify a `strftime`_ format string. Use ``time_format='%c'`` to get a nicely-formatted locale specific date and time (i.e. ``Fri May 2 19:08:32 2014``). .. _strftime: https://docs.python.org/2/library/datetime.html#strftime-strptime-behavior .. versionadded:: 2014.1.4 .. code-block:: bash salt '*' ps.boot_time ''' try: b_time = int(psutil.boot_time()) except AttributeError: # get_boot_time() has been removed in newer psutil versions, and has # been replaced by boot_time() which provides the same information. b_time = int(psutil.boot_time()) if time_format: # Load epoch timestamp as a datetime.datetime object b_time = datetime.datetime.fromtimestamp(b_time) try: return b_time.strftime(time_format) except TypeError as exc: raise SaltInvocationError('Invalid format string: {0}'.format(exc)) return b_time def network_io_counters(interface=None): ''' Return network I/O statistics. CLI Example: .. code-block:: bash salt '*' ps.network_io_counters salt '*' ps.network_io_counters interface=eth0 ''' if not interface: return dict(psutil.net_io_counters()._asdict()) else: stats = psutil.net_io_counters(pernic=True) if interface in stats: return dict(stats[interface]._asdict()) else: return False def disk_io_counters(device=None): ''' Return disk I/O statistics. CLI Example: .. code-block:: bash salt '*' ps.disk_io_counters salt '*' ps.disk_io_counters device=sda1 ''' if not device: return dict(psutil.disk_io_counters()._asdict()) else: stats = psutil.disk_io_counters(perdisk=True) if device in stats: return dict(stats[device]._asdict()) else: return False def get_users(): ''' Return logged-in users. CLI Example: .. code-block:: bash salt '*' ps.get_users ''' try: recs = psutil.users() return [dict(x._asdict()) for x in recs] except AttributeError: # get_users is only present in psutil > v0.5.0 # try utmp try: import utmp # pylint: disable=import-error result = [] while True: rec = utmp.utmpaccess.getutent() if rec is None: return result elif rec[0] == 7: started = rec[8] if isinstance(started, tuple): started = started[0] result.append({'name': rec[4], 'terminal': rec[2], 'started': started, 'host': rec[5]}) except ImportError: return False def lsof(name): ''' Retrieve the lsof information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.lsof apache2 ''' sanitize_name = six.text_type(name) lsof_infos = __salt__['cmd.run']("lsof -c " + sanitize_name) ret = [] ret.extend([sanitize_name, lsof_infos]) return ret @salt.utils.decorators.path.which('netstat') def netstat(name): ''' Retrieve the netstat information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.netstat apache2 ''' sanitize_name = six.text_type(name) netstat_infos = __salt__['cmd.run']("netstat -nap") found_infos = [] ret = [] for info in netstat_infos.splitlines(): if info.find(sanitize_name) != -1: found_infos.append(info) ret.extend([sanitize_name, found_infos]) return ret @salt.utils.decorators.path.which('ss') def ss(name): ''' Retrieve the ss information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.ss apache2 .. versionadded:: 2016.11.6 ''' sanitize_name = six.text_type(name) ss_infos = __salt__['cmd.run']("ss -neap") found_infos = [] ret = [] for info in ss_infos.splitlines(): if info.find(sanitize_name) != -1: found_infos.append(info) ret.extend([sanitize_name, found_infos]) return ret def psaux(name): ''' Retrieve information corresponding to a "ps aux" filtered with the given pattern. It could be just a name or a regular expression (using python search from "re" module). CLI Example: .. code-block:: bash salt '*' ps.psaux www-data.+apache2 ''' sanitize_name = six.text_type(name) pattern = re.compile(sanitize_name) salt_exception_pattern = re.compile("salt.+ps.psaux.+") ps_aux = __salt__['cmd.run']("ps aux") found_infos = [] ret = [] nb_lines = 0 for info in ps_aux.splitlines(): found = pattern.search(info) if found is not None: # remove 'salt' command from results if not salt_exception_pattern.search(info): nb_lines += 1 found_infos.append(info) pid_count = six.text_type(nb_lines) + " occurence(s)." ret = [] ret.extend([sanitize_name, found_infos, pid_count]) return ret
saltstack/salt
salt/modules/ps.py
disk_partition_usage
python
def disk_partition_usage(all=False): ''' Return a list of disk partitions plus the mount point, filesystem and usage statistics. CLI Example: .. code-block:: bash salt '*' ps.disk_partition_usage ''' result = disk_partitions(all) for partition in result: partition.update(disk_usage(partition['mountpoint'])) return result
Return a list of disk partitions plus the mount point, filesystem and usage statistics. CLI Example: .. code-block:: bash salt '*' ps.disk_partition_usage
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/ps.py#L494-L508
[ "def disk_partitions(all=False):\n '''\n Return a list of disk partitions and their device, mount point, and\n filesystem type.\n\n all\n if set to False, only return local, physical partitions (hard disk,\n USB, CD/DVD partitions). If True, return all filesystems.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' ps.disk_partitions\n '''\n result = [dict(partition._asdict()) for partition in\n psutil.disk_partitions(all)]\n return result\n", "def disk_usage(path):\n '''\n Given a path, return a dict listing the total available space as well as\n the free space, and used space.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' ps.disk_usage /home\n '''\n return dict(psutil.disk_usage(path)._asdict())\n" ]
# -*- coding: utf-8 -*- ''' A salt interface to psutil, a system and process library. See http://code.google.com/p/psutil. :depends: - psutil Python module, version 0.3.0 or later - python-utmp package (optional) ''' # Import python libs from __future__ import absolute_import, unicode_literals, print_function import time import datetime import re # Import salt libs import salt.utils.data from salt.exceptions import SaltInvocationError, CommandExecutionError # Import third party libs import salt.utils.decorators.path from salt.ext import six # pylint: disable=import-error try: import salt.utils.psutil_compat as psutil HAS_PSUTIL = True PSUTIL2 = getattr(psutil, 'version_info', ()) >= (2, 0) except ImportError: HAS_PSUTIL = False # pylint: enable=import-error def __virtual__(): if not HAS_PSUTIL: return False, 'The ps module cannot be loaded: python module psutil not installed.' # Functions and attributes used in this execution module seem to have been # added as of psutil 0.3.0, from an inspection of the source code. Only # make this module available if the version of psutil is >= 0.3.0. Note # that this may need to be tweaked if we find post-0.3.0 versions which # also have problems running the functions in this execution module, but # most distributions have already moved to later versions (for example, # as of Dec. 2013 EPEL is on 0.6.1, Debian 7 is on 0.5.1, etc.). if psutil.version_info >= (0, 3, 0): return True return (False, 'The ps execution module cannot be loaded: the psutil python module version {0} is less than 0.3.0'.format(psutil.version_info)) def _get_proc_cmdline(proc): ''' Returns the cmdline of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.cmdline() if PSUTIL2 else proc.cmdline) except (psutil.NoSuchProcess, psutil.AccessDenied): return [] def _get_proc_create_time(proc): ''' Returns the create_time of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.create_time() if PSUTIL2 else proc.create_time) except (psutil.NoSuchProcess, psutil.AccessDenied): return None def _get_proc_name(proc): ''' Returns the name of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.name() if PSUTIL2 else proc.name) except (psutil.NoSuchProcess, psutil.AccessDenied): return [] def _get_proc_status(proc): ''' Returns the status of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.status() if PSUTIL2 else proc.status) except (psutil.NoSuchProcess, psutil.AccessDenied): return None def _get_proc_username(proc): ''' Returns the username of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.username() if PSUTIL2 else proc.username) except (psutil.NoSuchProcess, psutil.AccessDenied, KeyError): return None def _get_proc_pid(proc): ''' Returns the pid of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' return proc.pid def top(num_processes=5, interval=3): ''' Return a list of top CPU consuming processes during the interval. num_processes = return the top N CPU consuming processes interval = the number of seconds to sample CPU usage over CLI Examples: .. code-block:: bash salt '*' ps.top salt '*' ps.top 5 10 ''' result = [] start_usage = {} for pid in psutil.pids(): try: process = psutil.Process(pid) user, system = process.cpu_times() except ValueError: user, system, _, _ = process.cpu_times() except psutil.NoSuchProcess: continue start_usage[process] = user + system time.sleep(interval) usage = set() for process, start in six.iteritems(start_usage): try: user, system = process.cpu_times() except ValueError: user, system, _, _ = process.cpu_times() except psutil.NoSuchProcess: continue now = user + system diff = now - start usage.add((diff, process)) for idx, (diff, process) in enumerate(reversed(sorted(usage))): if num_processes and idx >= num_processes: break if not _get_proc_cmdline(process): cmdline = _get_proc_name(process) else: cmdline = _get_proc_cmdline(process) info = {'cmd': cmdline, 'user': _get_proc_username(process), 'status': _get_proc_status(process), 'pid': _get_proc_pid(process), 'create_time': _get_proc_create_time(process), 'cpu': {}, 'mem': {}, } for key, value in six.iteritems(process.cpu_times()._asdict()): info['cpu'][key] = value for key, value in six.iteritems(process.memory_info()._asdict()): info['mem'][key] = value result.append(info) return result def get_pid_list(): ''' Return a list of process ids (PIDs) for all running processes. CLI Example: .. code-block:: bash salt '*' ps.get_pid_list ''' return psutil.pids() def proc_info(pid, attrs=None): ''' Return a dictionary of information for a process id (PID). CLI Example: .. code-block:: bash salt '*' ps.proc_info 2322 salt '*' ps.proc_info 2322 attrs='["pid", "name"]' pid PID of process to query. attrs Optional list of desired process attributes. The list of possible attributes can be found here: http://pythonhosted.org/psutil/#psutil.Process ''' try: proc = psutil.Process(pid) return proc.as_dict(attrs) except (psutil.NoSuchProcess, psutil.AccessDenied, AttributeError) as exc: raise CommandExecutionError(exc) def kill_pid(pid, signal=15): ''' Kill a process by PID. .. code-block:: bash salt 'minion' ps.kill_pid pid [signal=signal_number] pid PID of process to kill. signal Signal to send to the process. See manpage entry for kill for possible values. Default: 15 (SIGTERM). **Example:** Send SIGKILL to process with PID 2000: .. code-block:: bash salt 'minion' ps.kill_pid 2000 signal=9 ''' try: psutil.Process(pid).send_signal(signal) return True except psutil.NoSuchProcess: return False def pkill(pattern, user=None, signal=15, full=False): ''' Kill processes matching a pattern. .. code-block:: bash salt '*' ps.pkill pattern [user=username] [signal=signal_number] \\ [full=(true|false)] pattern Pattern to search for in the process list. user Limit matches to the given username. Default: All users. signal Signal to send to the process(es). See manpage entry for kill for possible values. Default: 15 (SIGTERM). full A boolean value indicating whether only the name of the command or the full command line should be matched against the pattern. **Examples:** Send SIGHUP to all httpd processes on all 'www' minions: .. code-block:: bash salt 'www.*' ps.pkill httpd signal=1 Send SIGKILL to all bash processes owned by user 'tom': .. code-block:: bash salt '*' ps.pkill bash signal=9 user=tom ''' killed = [] for proc in psutil.process_iter(): name_match = pattern in ' '.join(_get_proc_cmdline(proc)) if full \ else pattern in _get_proc_name(proc) user_match = True if user is None else user == _get_proc_username(proc) if name_match and user_match: try: proc.send_signal(signal) killed.append(_get_proc_pid(proc)) except psutil.NoSuchProcess: pass if not killed: return None else: return {'killed': killed} def pgrep(pattern, user=None, full=False, pattern_is_regex=False): ''' Return the pids for processes matching a pattern. If full is true, the full command line is searched for a match, otherwise only the name of the command is searched. .. code-block:: bash salt '*' ps.pgrep pattern [user=username] [full=(true|false)] pattern Pattern to search for in the process list. user Limit matches to the given username. Default: All users. full A boolean value indicating whether only the name of the command or the full command line should be matched against the pattern. pattern_is_regex This flag enables ps.pgrep to mirror the regex search functionality found in the pgrep command line utility. .. versionadded:: Neon **Examples:** Find all httpd processes on all 'www' minions: .. code-block:: bash salt 'www.*' ps.pgrep httpd Find all bash processes owned by user 'tom': .. code-block:: bash salt '*' ps.pgrep bash user=tom ''' procs = [] if pattern_is_regex: pattern = re.compile(str(pattern)) procs = [] for proc in psutil.process_iter(): if full: process_line = ' '.join(_get_proc_cmdline(proc)) else: process_line = _get_proc_name(proc) if pattern_is_regex: name_match = re.search(pattern, process_line) else: name_match = pattern in process_line user_match = True if user is None else user == _get_proc_username(proc) if name_match and user_match: procs.append(_get_proc_pid(proc)) return procs or None def cpu_percent(interval=0.1, per_cpu=False): ''' Return the percent of time the CPU is busy. interval the number of seconds to sample CPU usage over per_cpu if True return an array of CPU percent busy for each CPU, otherwise aggregate all percents into one number CLI Example: .. code-block:: bash salt '*' ps.cpu_percent ''' if per_cpu: result = list(psutil.cpu_percent(interval, True)) else: result = psutil.cpu_percent(interval) return result def cpu_times(per_cpu=False): ''' Return the percent of time the CPU spends in each state, e.g. user, system, idle, nice, iowait, irq, softirq. per_cpu if True return an array of percents for each CPU, otherwise aggregate all percents into one number CLI Example: .. code-block:: bash salt '*' ps.cpu_times ''' if per_cpu: result = [dict(times._asdict()) for times in psutil.cpu_times(True)] else: result = dict(psutil.cpu_times(per_cpu)._asdict()) return result def virtual_memory(): ''' .. versionadded:: 2014.7.0 Return a dict that describes statistics about system memory usage. .. note:: This function is only available in psutil version 0.6.0 and above. CLI Example: .. code-block:: bash salt '*' ps.virtual_memory ''' if psutil.version_info < (0, 6, 0): msg = 'virtual_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) return dict(psutil.virtual_memory()._asdict()) def swap_memory(): ''' .. versionadded:: 2014.7.0 Return a dict that describes swap memory statistics. .. note:: This function is only available in psutil version 0.6.0 and above. CLI Example: .. code-block:: bash salt '*' ps.swap_memory ''' if psutil.version_info < (0, 6, 0): msg = 'swap_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) return dict(psutil.swap_memory()._asdict()) def disk_partitions(all=False): ''' Return a list of disk partitions and their device, mount point, and filesystem type. all if set to False, only return local, physical partitions (hard disk, USB, CD/DVD partitions). If True, return all filesystems. CLI Example: .. code-block:: bash salt '*' ps.disk_partitions ''' result = [dict(partition._asdict()) for partition in psutil.disk_partitions(all)] return result def disk_usage(path): ''' Given a path, return a dict listing the total available space as well as the free space, and used space. CLI Example: .. code-block:: bash salt '*' ps.disk_usage /home ''' return dict(psutil.disk_usage(path)._asdict()) def total_physical_memory(): ''' Return the total number of bytes of physical memory. CLI Example: .. code-block:: bash salt '*' ps.total_physical_memory ''' if psutil.version_info < (0, 6, 0): msg = 'virtual_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) try: return psutil.virtual_memory().total except AttributeError: # TOTAL_PHYMEM is deprecated but with older psutil versions this is # needed as a fallback. return psutil.TOTAL_PHYMEM def num_cpus(): ''' Return the number of CPUs. CLI Example: .. code-block:: bash salt '*' ps.num_cpus ''' try: return psutil.cpu_count() except AttributeError: # NUM_CPUS is deprecated but with older psutil versions this is needed # as a fallback. return psutil.NUM_CPUS def boot_time(time_format=None): ''' Return the boot time in number of seconds since the epoch began. CLI Example: time_format Optionally specify a `strftime`_ format string. Use ``time_format='%c'`` to get a nicely-formatted locale specific date and time (i.e. ``Fri May 2 19:08:32 2014``). .. _strftime: https://docs.python.org/2/library/datetime.html#strftime-strptime-behavior .. versionadded:: 2014.1.4 .. code-block:: bash salt '*' ps.boot_time ''' try: b_time = int(psutil.boot_time()) except AttributeError: # get_boot_time() has been removed in newer psutil versions, and has # been replaced by boot_time() which provides the same information. b_time = int(psutil.boot_time()) if time_format: # Load epoch timestamp as a datetime.datetime object b_time = datetime.datetime.fromtimestamp(b_time) try: return b_time.strftime(time_format) except TypeError as exc: raise SaltInvocationError('Invalid format string: {0}'.format(exc)) return b_time def network_io_counters(interface=None): ''' Return network I/O statistics. CLI Example: .. code-block:: bash salt '*' ps.network_io_counters salt '*' ps.network_io_counters interface=eth0 ''' if not interface: return dict(psutil.net_io_counters()._asdict()) else: stats = psutil.net_io_counters(pernic=True) if interface in stats: return dict(stats[interface]._asdict()) else: return False def disk_io_counters(device=None): ''' Return disk I/O statistics. CLI Example: .. code-block:: bash salt '*' ps.disk_io_counters salt '*' ps.disk_io_counters device=sda1 ''' if not device: return dict(psutil.disk_io_counters()._asdict()) else: stats = psutil.disk_io_counters(perdisk=True) if device in stats: return dict(stats[device]._asdict()) else: return False def get_users(): ''' Return logged-in users. CLI Example: .. code-block:: bash salt '*' ps.get_users ''' try: recs = psutil.users() return [dict(x._asdict()) for x in recs] except AttributeError: # get_users is only present in psutil > v0.5.0 # try utmp try: import utmp # pylint: disable=import-error result = [] while True: rec = utmp.utmpaccess.getutent() if rec is None: return result elif rec[0] == 7: started = rec[8] if isinstance(started, tuple): started = started[0] result.append({'name': rec[4], 'terminal': rec[2], 'started': started, 'host': rec[5]}) except ImportError: return False def lsof(name): ''' Retrieve the lsof information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.lsof apache2 ''' sanitize_name = six.text_type(name) lsof_infos = __salt__['cmd.run']("lsof -c " + sanitize_name) ret = [] ret.extend([sanitize_name, lsof_infos]) return ret @salt.utils.decorators.path.which('netstat') def netstat(name): ''' Retrieve the netstat information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.netstat apache2 ''' sanitize_name = six.text_type(name) netstat_infos = __salt__['cmd.run']("netstat -nap") found_infos = [] ret = [] for info in netstat_infos.splitlines(): if info.find(sanitize_name) != -1: found_infos.append(info) ret.extend([sanitize_name, found_infos]) return ret @salt.utils.decorators.path.which('ss') def ss(name): ''' Retrieve the ss information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.ss apache2 .. versionadded:: 2016.11.6 ''' sanitize_name = six.text_type(name) ss_infos = __salt__['cmd.run']("ss -neap") found_infos = [] ret = [] for info in ss_infos.splitlines(): if info.find(sanitize_name) != -1: found_infos.append(info) ret.extend([sanitize_name, found_infos]) return ret def psaux(name): ''' Retrieve information corresponding to a "ps aux" filtered with the given pattern. It could be just a name or a regular expression (using python search from "re" module). CLI Example: .. code-block:: bash salt '*' ps.psaux www-data.+apache2 ''' sanitize_name = six.text_type(name) pattern = re.compile(sanitize_name) salt_exception_pattern = re.compile("salt.+ps.psaux.+") ps_aux = __salt__['cmd.run']("ps aux") found_infos = [] ret = [] nb_lines = 0 for info in ps_aux.splitlines(): found = pattern.search(info) if found is not None: # remove 'salt' command from results if not salt_exception_pattern.search(info): nb_lines += 1 found_infos.append(info) pid_count = six.text_type(nb_lines) + " occurence(s)." ret = [] ret.extend([sanitize_name, found_infos, pid_count]) return ret
saltstack/salt
salt/modules/ps.py
total_physical_memory
python
def total_physical_memory(): ''' Return the total number of bytes of physical memory. CLI Example: .. code-block:: bash salt '*' ps.total_physical_memory ''' if psutil.version_info < (0, 6, 0): msg = 'virtual_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) try: return psutil.virtual_memory().total except AttributeError: # TOTAL_PHYMEM is deprecated but with older psutil versions this is # needed as a fallback. return psutil.TOTAL_PHYMEM
Return the total number of bytes of physical memory. CLI Example: .. code-block:: bash salt '*' ps.total_physical_memory
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/ps.py#L511-L529
null
# -*- coding: utf-8 -*- ''' A salt interface to psutil, a system and process library. See http://code.google.com/p/psutil. :depends: - psutil Python module, version 0.3.0 or later - python-utmp package (optional) ''' # Import python libs from __future__ import absolute_import, unicode_literals, print_function import time import datetime import re # Import salt libs import salt.utils.data from salt.exceptions import SaltInvocationError, CommandExecutionError # Import third party libs import salt.utils.decorators.path from salt.ext import six # pylint: disable=import-error try: import salt.utils.psutil_compat as psutil HAS_PSUTIL = True PSUTIL2 = getattr(psutil, 'version_info', ()) >= (2, 0) except ImportError: HAS_PSUTIL = False # pylint: enable=import-error def __virtual__(): if not HAS_PSUTIL: return False, 'The ps module cannot be loaded: python module psutil not installed.' # Functions and attributes used in this execution module seem to have been # added as of psutil 0.3.0, from an inspection of the source code. Only # make this module available if the version of psutil is >= 0.3.0. Note # that this may need to be tweaked if we find post-0.3.0 versions which # also have problems running the functions in this execution module, but # most distributions have already moved to later versions (for example, # as of Dec. 2013 EPEL is on 0.6.1, Debian 7 is on 0.5.1, etc.). if psutil.version_info >= (0, 3, 0): return True return (False, 'The ps execution module cannot be loaded: the psutil python module version {0} is less than 0.3.0'.format(psutil.version_info)) def _get_proc_cmdline(proc): ''' Returns the cmdline of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.cmdline() if PSUTIL2 else proc.cmdline) except (psutil.NoSuchProcess, psutil.AccessDenied): return [] def _get_proc_create_time(proc): ''' Returns the create_time of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.create_time() if PSUTIL2 else proc.create_time) except (psutil.NoSuchProcess, psutil.AccessDenied): return None def _get_proc_name(proc): ''' Returns the name of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.name() if PSUTIL2 else proc.name) except (psutil.NoSuchProcess, psutil.AccessDenied): return [] def _get_proc_status(proc): ''' Returns the status of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.status() if PSUTIL2 else proc.status) except (psutil.NoSuchProcess, psutil.AccessDenied): return None def _get_proc_username(proc): ''' Returns the username of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.username() if PSUTIL2 else proc.username) except (psutil.NoSuchProcess, psutil.AccessDenied, KeyError): return None def _get_proc_pid(proc): ''' Returns the pid of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' return proc.pid def top(num_processes=5, interval=3): ''' Return a list of top CPU consuming processes during the interval. num_processes = return the top N CPU consuming processes interval = the number of seconds to sample CPU usage over CLI Examples: .. code-block:: bash salt '*' ps.top salt '*' ps.top 5 10 ''' result = [] start_usage = {} for pid in psutil.pids(): try: process = psutil.Process(pid) user, system = process.cpu_times() except ValueError: user, system, _, _ = process.cpu_times() except psutil.NoSuchProcess: continue start_usage[process] = user + system time.sleep(interval) usage = set() for process, start in six.iteritems(start_usage): try: user, system = process.cpu_times() except ValueError: user, system, _, _ = process.cpu_times() except psutil.NoSuchProcess: continue now = user + system diff = now - start usage.add((diff, process)) for idx, (diff, process) in enumerate(reversed(sorted(usage))): if num_processes and idx >= num_processes: break if not _get_proc_cmdline(process): cmdline = _get_proc_name(process) else: cmdline = _get_proc_cmdline(process) info = {'cmd': cmdline, 'user': _get_proc_username(process), 'status': _get_proc_status(process), 'pid': _get_proc_pid(process), 'create_time': _get_proc_create_time(process), 'cpu': {}, 'mem': {}, } for key, value in six.iteritems(process.cpu_times()._asdict()): info['cpu'][key] = value for key, value in six.iteritems(process.memory_info()._asdict()): info['mem'][key] = value result.append(info) return result def get_pid_list(): ''' Return a list of process ids (PIDs) for all running processes. CLI Example: .. code-block:: bash salt '*' ps.get_pid_list ''' return psutil.pids() def proc_info(pid, attrs=None): ''' Return a dictionary of information for a process id (PID). CLI Example: .. code-block:: bash salt '*' ps.proc_info 2322 salt '*' ps.proc_info 2322 attrs='["pid", "name"]' pid PID of process to query. attrs Optional list of desired process attributes. The list of possible attributes can be found here: http://pythonhosted.org/psutil/#psutil.Process ''' try: proc = psutil.Process(pid) return proc.as_dict(attrs) except (psutil.NoSuchProcess, psutil.AccessDenied, AttributeError) as exc: raise CommandExecutionError(exc) def kill_pid(pid, signal=15): ''' Kill a process by PID. .. code-block:: bash salt 'minion' ps.kill_pid pid [signal=signal_number] pid PID of process to kill. signal Signal to send to the process. See manpage entry for kill for possible values. Default: 15 (SIGTERM). **Example:** Send SIGKILL to process with PID 2000: .. code-block:: bash salt 'minion' ps.kill_pid 2000 signal=9 ''' try: psutil.Process(pid).send_signal(signal) return True except psutil.NoSuchProcess: return False def pkill(pattern, user=None, signal=15, full=False): ''' Kill processes matching a pattern. .. code-block:: bash salt '*' ps.pkill pattern [user=username] [signal=signal_number] \\ [full=(true|false)] pattern Pattern to search for in the process list. user Limit matches to the given username. Default: All users. signal Signal to send to the process(es). See manpage entry for kill for possible values. Default: 15 (SIGTERM). full A boolean value indicating whether only the name of the command or the full command line should be matched against the pattern. **Examples:** Send SIGHUP to all httpd processes on all 'www' minions: .. code-block:: bash salt 'www.*' ps.pkill httpd signal=1 Send SIGKILL to all bash processes owned by user 'tom': .. code-block:: bash salt '*' ps.pkill bash signal=9 user=tom ''' killed = [] for proc in psutil.process_iter(): name_match = pattern in ' '.join(_get_proc_cmdline(proc)) if full \ else pattern in _get_proc_name(proc) user_match = True if user is None else user == _get_proc_username(proc) if name_match and user_match: try: proc.send_signal(signal) killed.append(_get_proc_pid(proc)) except psutil.NoSuchProcess: pass if not killed: return None else: return {'killed': killed} def pgrep(pattern, user=None, full=False, pattern_is_regex=False): ''' Return the pids for processes matching a pattern. If full is true, the full command line is searched for a match, otherwise only the name of the command is searched. .. code-block:: bash salt '*' ps.pgrep pattern [user=username] [full=(true|false)] pattern Pattern to search for in the process list. user Limit matches to the given username. Default: All users. full A boolean value indicating whether only the name of the command or the full command line should be matched against the pattern. pattern_is_regex This flag enables ps.pgrep to mirror the regex search functionality found in the pgrep command line utility. .. versionadded:: Neon **Examples:** Find all httpd processes on all 'www' minions: .. code-block:: bash salt 'www.*' ps.pgrep httpd Find all bash processes owned by user 'tom': .. code-block:: bash salt '*' ps.pgrep bash user=tom ''' procs = [] if pattern_is_regex: pattern = re.compile(str(pattern)) procs = [] for proc in psutil.process_iter(): if full: process_line = ' '.join(_get_proc_cmdline(proc)) else: process_line = _get_proc_name(proc) if pattern_is_regex: name_match = re.search(pattern, process_line) else: name_match = pattern in process_line user_match = True if user is None else user == _get_proc_username(proc) if name_match and user_match: procs.append(_get_proc_pid(proc)) return procs or None def cpu_percent(interval=0.1, per_cpu=False): ''' Return the percent of time the CPU is busy. interval the number of seconds to sample CPU usage over per_cpu if True return an array of CPU percent busy for each CPU, otherwise aggregate all percents into one number CLI Example: .. code-block:: bash salt '*' ps.cpu_percent ''' if per_cpu: result = list(psutil.cpu_percent(interval, True)) else: result = psutil.cpu_percent(interval) return result def cpu_times(per_cpu=False): ''' Return the percent of time the CPU spends in each state, e.g. user, system, idle, nice, iowait, irq, softirq. per_cpu if True return an array of percents for each CPU, otherwise aggregate all percents into one number CLI Example: .. code-block:: bash salt '*' ps.cpu_times ''' if per_cpu: result = [dict(times._asdict()) for times in psutil.cpu_times(True)] else: result = dict(psutil.cpu_times(per_cpu)._asdict()) return result def virtual_memory(): ''' .. versionadded:: 2014.7.0 Return a dict that describes statistics about system memory usage. .. note:: This function is only available in psutil version 0.6.0 and above. CLI Example: .. code-block:: bash salt '*' ps.virtual_memory ''' if psutil.version_info < (0, 6, 0): msg = 'virtual_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) return dict(psutil.virtual_memory()._asdict()) def swap_memory(): ''' .. versionadded:: 2014.7.0 Return a dict that describes swap memory statistics. .. note:: This function is only available in psutil version 0.6.0 and above. CLI Example: .. code-block:: bash salt '*' ps.swap_memory ''' if psutil.version_info < (0, 6, 0): msg = 'swap_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) return dict(psutil.swap_memory()._asdict()) def disk_partitions(all=False): ''' Return a list of disk partitions and their device, mount point, and filesystem type. all if set to False, only return local, physical partitions (hard disk, USB, CD/DVD partitions). If True, return all filesystems. CLI Example: .. code-block:: bash salt '*' ps.disk_partitions ''' result = [dict(partition._asdict()) for partition in psutil.disk_partitions(all)] return result def disk_usage(path): ''' Given a path, return a dict listing the total available space as well as the free space, and used space. CLI Example: .. code-block:: bash salt '*' ps.disk_usage /home ''' return dict(psutil.disk_usage(path)._asdict()) def disk_partition_usage(all=False): ''' Return a list of disk partitions plus the mount point, filesystem and usage statistics. CLI Example: .. code-block:: bash salt '*' ps.disk_partition_usage ''' result = disk_partitions(all) for partition in result: partition.update(disk_usage(partition['mountpoint'])) return result def num_cpus(): ''' Return the number of CPUs. CLI Example: .. code-block:: bash salt '*' ps.num_cpus ''' try: return psutil.cpu_count() except AttributeError: # NUM_CPUS is deprecated but with older psutil versions this is needed # as a fallback. return psutil.NUM_CPUS def boot_time(time_format=None): ''' Return the boot time in number of seconds since the epoch began. CLI Example: time_format Optionally specify a `strftime`_ format string. Use ``time_format='%c'`` to get a nicely-formatted locale specific date and time (i.e. ``Fri May 2 19:08:32 2014``). .. _strftime: https://docs.python.org/2/library/datetime.html#strftime-strptime-behavior .. versionadded:: 2014.1.4 .. code-block:: bash salt '*' ps.boot_time ''' try: b_time = int(psutil.boot_time()) except AttributeError: # get_boot_time() has been removed in newer psutil versions, and has # been replaced by boot_time() which provides the same information. b_time = int(psutil.boot_time()) if time_format: # Load epoch timestamp as a datetime.datetime object b_time = datetime.datetime.fromtimestamp(b_time) try: return b_time.strftime(time_format) except TypeError as exc: raise SaltInvocationError('Invalid format string: {0}'.format(exc)) return b_time def network_io_counters(interface=None): ''' Return network I/O statistics. CLI Example: .. code-block:: bash salt '*' ps.network_io_counters salt '*' ps.network_io_counters interface=eth0 ''' if not interface: return dict(psutil.net_io_counters()._asdict()) else: stats = psutil.net_io_counters(pernic=True) if interface in stats: return dict(stats[interface]._asdict()) else: return False def disk_io_counters(device=None): ''' Return disk I/O statistics. CLI Example: .. code-block:: bash salt '*' ps.disk_io_counters salt '*' ps.disk_io_counters device=sda1 ''' if not device: return dict(psutil.disk_io_counters()._asdict()) else: stats = psutil.disk_io_counters(perdisk=True) if device in stats: return dict(stats[device]._asdict()) else: return False def get_users(): ''' Return logged-in users. CLI Example: .. code-block:: bash salt '*' ps.get_users ''' try: recs = psutil.users() return [dict(x._asdict()) for x in recs] except AttributeError: # get_users is only present in psutil > v0.5.0 # try utmp try: import utmp # pylint: disable=import-error result = [] while True: rec = utmp.utmpaccess.getutent() if rec is None: return result elif rec[0] == 7: started = rec[8] if isinstance(started, tuple): started = started[0] result.append({'name': rec[4], 'terminal': rec[2], 'started': started, 'host': rec[5]}) except ImportError: return False def lsof(name): ''' Retrieve the lsof information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.lsof apache2 ''' sanitize_name = six.text_type(name) lsof_infos = __salt__['cmd.run']("lsof -c " + sanitize_name) ret = [] ret.extend([sanitize_name, lsof_infos]) return ret @salt.utils.decorators.path.which('netstat') def netstat(name): ''' Retrieve the netstat information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.netstat apache2 ''' sanitize_name = six.text_type(name) netstat_infos = __salt__['cmd.run']("netstat -nap") found_infos = [] ret = [] for info in netstat_infos.splitlines(): if info.find(sanitize_name) != -1: found_infos.append(info) ret.extend([sanitize_name, found_infos]) return ret @salt.utils.decorators.path.which('ss') def ss(name): ''' Retrieve the ss information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.ss apache2 .. versionadded:: 2016.11.6 ''' sanitize_name = six.text_type(name) ss_infos = __salt__['cmd.run']("ss -neap") found_infos = [] ret = [] for info in ss_infos.splitlines(): if info.find(sanitize_name) != -1: found_infos.append(info) ret.extend([sanitize_name, found_infos]) return ret def psaux(name): ''' Retrieve information corresponding to a "ps aux" filtered with the given pattern. It could be just a name or a regular expression (using python search from "re" module). CLI Example: .. code-block:: bash salt '*' ps.psaux www-data.+apache2 ''' sanitize_name = six.text_type(name) pattern = re.compile(sanitize_name) salt_exception_pattern = re.compile("salt.+ps.psaux.+") ps_aux = __salt__['cmd.run']("ps aux") found_infos = [] ret = [] nb_lines = 0 for info in ps_aux.splitlines(): found = pattern.search(info) if found is not None: # remove 'salt' command from results if not salt_exception_pattern.search(info): nb_lines += 1 found_infos.append(info) pid_count = six.text_type(nb_lines) + " occurence(s)." ret = [] ret.extend([sanitize_name, found_infos, pid_count]) return ret
saltstack/salt
salt/modules/ps.py
boot_time
python
def boot_time(time_format=None): ''' Return the boot time in number of seconds since the epoch began. CLI Example: time_format Optionally specify a `strftime`_ format string. Use ``time_format='%c'`` to get a nicely-formatted locale specific date and time (i.e. ``Fri May 2 19:08:32 2014``). .. _strftime: https://docs.python.org/2/library/datetime.html#strftime-strptime-behavior .. versionadded:: 2014.1.4 .. code-block:: bash salt '*' ps.boot_time ''' try: b_time = int(psutil.boot_time()) except AttributeError: # get_boot_time() has been removed in newer psutil versions, and has # been replaced by boot_time() which provides the same information. b_time = int(psutil.boot_time()) if time_format: # Load epoch timestamp as a datetime.datetime object b_time = datetime.datetime.fromtimestamp(b_time) try: return b_time.strftime(time_format) except TypeError as exc: raise SaltInvocationError('Invalid format string: {0}'.format(exc)) return b_time
Return the boot time in number of seconds since the epoch began. CLI Example: time_format Optionally specify a `strftime`_ format string. Use ``time_format='%c'`` to get a nicely-formatted locale specific date and time (i.e. ``Fri May 2 19:08:32 2014``). .. _strftime: https://docs.python.org/2/library/datetime.html#strftime-strptime-behavior .. versionadded:: 2014.1.4 .. code-block:: bash salt '*' ps.boot_time
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/ps.py#L550-L582
null
# -*- coding: utf-8 -*- ''' A salt interface to psutil, a system and process library. See http://code.google.com/p/psutil. :depends: - psutil Python module, version 0.3.0 or later - python-utmp package (optional) ''' # Import python libs from __future__ import absolute_import, unicode_literals, print_function import time import datetime import re # Import salt libs import salt.utils.data from salt.exceptions import SaltInvocationError, CommandExecutionError # Import third party libs import salt.utils.decorators.path from salt.ext import six # pylint: disable=import-error try: import salt.utils.psutil_compat as psutil HAS_PSUTIL = True PSUTIL2 = getattr(psutil, 'version_info', ()) >= (2, 0) except ImportError: HAS_PSUTIL = False # pylint: enable=import-error def __virtual__(): if not HAS_PSUTIL: return False, 'The ps module cannot be loaded: python module psutil not installed.' # Functions and attributes used in this execution module seem to have been # added as of psutil 0.3.0, from an inspection of the source code. Only # make this module available if the version of psutil is >= 0.3.0. Note # that this may need to be tweaked if we find post-0.3.0 versions which # also have problems running the functions in this execution module, but # most distributions have already moved to later versions (for example, # as of Dec. 2013 EPEL is on 0.6.1, Debian 7 is on 0.5.1, etc.). if psutil.version_info >= (0, 3, 0): return True return (False, 'The ps execution module cannot be loaded: the psutil python module version {0} is less than 0.3.0'.format(psutil.version_info)) def _get_proc_cmdline(proc): ''' Returns the cmdline of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.cmdline() if PSUTIL2 else proc.cmdline) except (psutil.NoSuchProcess, psutil.AccessDenied): return [] def _get_proc_create_time(proc): ''' Returns the create_time of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.create_time() if PSUTIL2 else proc.create_time) except (psutil.NoSuchProcess, psutil.AccessDenied): return None def _get_proc_name(proc): ''' Returns the name of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.name() if PSUTIL2 else proc.name) except (psutil.NoSuchProcess, psutil.AccessDenied): return [] def _get_proc_status(proc): ''' Returns the status of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.status() if PSUTIL2 else proc.status) except (psutil.NoSuchProcess, psutil.AccessDenied): return None def _get_proc_username(proc): ''' Returns the username of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.username() if PSUTIL2 else proc.username) except (psutil.NoSuchProcess, psutil.AccessDenied, KeyError): return None def _get_proc_pid(proc): ''' Returns the pid of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' return proc.pid def top(num_processes=5, interval=3): ''' Return a list of top CPU consuming processes during the interval. num_processes = return the top N CPU consuming processes interval = the number of seconds to sample CPU usage over CLI Examples: .. code-block:: bash salt '*' ps.top salt '*' ps.top 5 10 ''' result = [] start_usage = {} for pid in psutil.pids(): try: process = psutil.Process(pid) user, system = process.cpu_times() except ValueError: user, system, _, _ = process.cpu_times() except psutil.NoSuchProcess: continue start_usage[process] = user + system time.sleep(interval) usage = set() for process, start in six.iteritems(start_usage): try: user, system = process.cpu_times() except ValueError: user, system, _, _ = process.cpu_times() except psutil.NoSuchProcess: continue now = user + system diff = now - start usage.add((diff, process)) for idx, (diff, process) in enumerate(reversed(sorted(usage))): if num_processes and idx >= num_processes: break if not _get_proc_cmdline(process): cmdline = _get_proc_name(process) else: cmdline = _get_proc_cmdline(process) info = {'cmd': cmdline, 'user': _get_proc_username(process), 'status': _get_proc_status(process), 'pid': _get_proc_pid(process), 'create_time': _get_proc_create_time(process), 'cpu': {}, 'mem': {}, } for key, value in six.iteritems(process.cpu_times()._asdict()): info['cpu'][key] = value for key, value in six.iteritems(process.memory_info()._asdict()): info['mem'][key] = value result.append(info) return result def get_pid_list(): ''' Return a list of process ids (PIDs) for all running processes. CLI Example: .. code-block:: bash salt '*' ps.get_pid_list ''' return psutil.pids() def proc_info(pid, attrs=None): ''' Return a dictionary of information for a process id (PID). CLI Example: .. code-block:: bash salt '*' ps.proc_info 2322 salt '*' ps.proc_info 2322 attrs='["pid", "name"]' pid PID of process to query. attrs Optional list of desired process attributes. The list of possible attributes can be found here: http://pythonhosted.org/psutil/#psutil.Process ''' try: proc = psutil.Process(pid) return proc.as_dict(attrs) except (psutil.NoSuchProcess, psutil.AccessDenied, AttributeError) as exc: raise CommandExecutionError(exc) def kill_pid(pid, signal=15): ''' Kill a process by PID. .. code-block:: bash salt 'minion' ps.kill_pid pid [signal=signal_number] pid PID of process to kill. signal Signal to send to the process. See manpage entry for kill for possible values. Default: 15 (SIGTERM). **Example:** Send SIGKILL to process with PID 2000: .. code-block:: bash salt 'minion' ps.kill_pid 2000 signal=9 ''' try: psutil.Process(pid).send_signal(signal) return True except psutil.NoSuchProcess: return False def pkill(pattern, user=None, signal=15, full=False): ''' Kill processes matching a pattern. .. code-block:: bash salt '*' ps.pkill pattern [user=username] [signal=signal_number] \\ [full=(true|false)] pattern Pattern to search for in the process list. user Limit matches to the given username. Default: All users. signal Signal to send to the process(es). See manpage entry for kill for possible values. Default: 15 (SIGTERM). full A boolean value indicating whether only the name of the command or the full command line should be matched against the pattern. **Examples:** Send SIGHUP to all httpd processes on all 'www' minions: .. code-block:: bash salt 'www.*' ps.pkill httpd signal=1 Send SIGKILL to all bash processes owned by user 'tom': .. code-block:: bash salt '*' ps.pkill bash signal=9 user=tom ''' killed = [] for proc in psutil.process_iter(): name_match = pattern in ' '.join(_get_proc_cmdline(proc)) if full \ else pattern in _get_proc_name(proc) user_match = True if user is None else user == _get_proc_username(proc) if name_match and user_match: try: proc.send_signal(signal) killed.append(_get_proc_pid(proc)) except psutil.NoSuchProcess: pass if not killed: return None else: return {'killed': killed} def pgrep(pattern, user=None, full=False, pattern_is_regex=False): ''' Return the pids for processes matching a pattern. If full is true, the full command line is searched for a match, otherwise only the name of the command is searched. .. code-block:: bash salt '*' ps.pgrep pattern [user=username] [full=(true|false)] pattern Pattern to search for in the process list. user Limit matches to the given username. Default: All users. full A boolean value indicating whether only the name of the command or the full command line should be matched against the pattern. pattern_is_regex This flag enables ps.pgrep to mirror the regex search functionality found in the pgrep command line utility. .. versionadded:: Neon **Examples:** Find all httpd processes on all 'www' minions: .. code-block:: bash salt 'www.*' ps.pgrep httpd Find all bash processes owned by user 'tom': .. code-block:: bash salt '*' ps.pgrep bash user=tom ''' procs = [] if pattern_is_regex: pattern = re.compile(str(pattern)) procs = [] for proc in psutil.process_iter(): if full: process_line = ' '.join(_get_proc_cmdline(proc)) else: process_line = _get_proc_name(proc) if pattern_is_regex: name_match = re.search(pattern, process_line) else: name_match = pattern in process_line user_match = True if user is None else user == _get_proc_username(proc) if name_match and user_match: procs.append(_get_proc_pid(proc)) return procs or None def cpu_percent(interval=0.1, per_cpu=False): ''' Return the percent of time the CPU is busy. interval the number of seconds to sample CPU usage over per_cpu if True return an array of CPU percent busy for each CPU, otherwise aggregate all percents into one number CLI Example: .. code-block:: bash salt '*' ps.cpu_percent ''' if per_cpu: result = list(psutil.cpu_percent(interval, True)) else: result = psutil.cpu_percent(interval) return result def cpu_times(per_cpu=False): ''' Return the percent of time the CPU spends in each state, e.g. user, system, idle, nice, iowait, irq, softirq. per_cpu if True return an array of percents for each CPU, otherwise aggregate all percents into one number CLI Example: .. code-block:: bash salt '*' ps.cpu_times ''' if per_cpu: result = [dict(times._asdict()) for times in psutil.cpu_times(True)] else: result = dict(psutil.cpu_times(per_cpu)._asdict()) return result def virtual_memory(): ''' .. versionadded:: 2014.7.0 Return a dict that describes statistics about system memory usage. .. note:: This function is only available in psutil version 0.6.0 and above. CLI Example: .. code-block:: bash salt '*' ps.virtual_memory ''' if psutil.version_info < (0, 6, 0): msg = 'virtual_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) return dict(psutil.virtual_memory()._asdict()) def swap_memory(): ''' .. versionadded:: 2014.7.0 Return a dict that describes swap memory statistics. .. note:: This function is only available in psutil version 0.6.0 and above. CLI Example: .. code-block:: bash salt '*' ps.swap_memory ''' if psutil.version_info < (0, 6, 0): msg = 'swap_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) return dict(psutil.swap_memory()._asdict()) def disk_partitions(all=False): ''' Return a list of disk partitions and their device, mount point, and filesystem type. all if set to False, only return local, physical partitions (hard disk, USB, CD/DVD partitions). If True, return all filesystems. CLI Example: .. code-block:: bash salt '*' ps.disk_partitions ''' result = [dict(partition._asdict()) for partition in psutil.disk_partitions(all)] return result def disk_usage(path): ''' Given a path, return a dict listing the total available space as well as the free space, and used space. CLI Example: .. code-block:: bash salt '*' ps.disk_usage /home ''' return dict(psutil.disk_usage(path)._asdict()) def disk_partition_usage(all=False): ''' Return a list of disk partitions plus the mount point, filesystem and usage statistics. CLI Example: .. code-block:: bash salt '*' ps.disk_partition_usage ''' result = disk_partitions(all) for partition in result: partition.update(disk_usage(partition['mountpoint'])) return result def total_physical_memory(): ''' Return the total number of bytes of physical memory. CLI Example: .. code-block:: bash salt '*' ps.total_physical_memory ''' if psutil.version_info < (0, 6, 0): msg = 'virtual_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) try: return psutil.virtual_memory().total except AttributeError: # TOTAL_PHYMEM is deprecated but with older psutil versions this is # needed as a fallback. return psutil.TOTAL_PHYMEM def num_cpus(): ''' Return the number of CPUs. CLI Example: .. code-block:: bash salt '*' ps.num_cpus ''' try: return psutil.cpu_count() except AttributeError: # NUM_CPUS is deprecated but with older psutil versions this is needed # as a fallback. return psutil.NUM_CPUS def network_io_counters(interface=None): ''' Return network I/O statistics. CLI Example: .. code-block:: bash salt '*' ps.network_io_counters salt '*' ps.network_io_counters interface=eth0 ''' if not interface: return dict(psutil.net_io_counters()._asdict()) else: stats = psutil.net_io_counters(pernic=True) if interface in stats: return dict(stats[interface]._asdict()) else: return False def disk_io_counters(device=None): ''' Return disk I/O statistics. CLI Example: .. code-block:: bash salt '*' ps.disk_io_counters salt '*' ps.disk_io_counters device=sda1 ''' if not device: return dict(psutil.disk_io_counters()._asdict()) else: stats = psutil.disk_io_counters(perdisk=True) if device in stats: return dict(stats[device]._asdict()) else: return False def get_users(): ''' Return logged-in users. CLI Example: .. code-block:: bash salt '*' ps.get_users ''' try: recs = psutil.users() return [dict(x._asdict()) for x in recs] except AttributeError: # get_users is only present in psutil > v0.5.0 # try utmp try: import utmp # pylint: disable=import-error result = [] while True: rec = utmp.utmpaccess.getutent() if rec is None: return result elif rec[0] == 7: started = rec[8] if isinstance(started, tuple): started = started[0] result.append({'name': rec[4], 'terminal': rec[2], 'started': started, 'host': rec[5]}) except ImportError: return False def lsof(name): ''' Retrieve the lsof information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.lsof apache2 ''' sanitize_name = six.text_type(name) lsof_infos = __salt__['cmd.run']("lsof -c " + sanitize_name) ret = [] ret.extend([sanitize_name, lsof_infos]) return ret @salt.utils.decorators.path.which('netstat') def netstat(name): ''' Retrieve the netstat information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.netstat apache2 ''' sanitize_name = six.text_type(name) netstat_infos = __salt__['cmd.run']("netstat -nap") found_infos = [] ret = [] for info in netstat_infos.splitlines(): if info.find(sanitize_name) != -1: found_infos.append(info) ret.extend([sanitize_name, found_infos]) return ret @salt.utils.decorators.path.which('ss') def ss(name): ''' Retrieve the ss information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.ss apache2 .. versionadded:: 2016.11.6 ''' sanitize_name = six.text_type(name) ss_infos = __salt__['cmd.run']("ss -neap") found_infos = [] ret = [] for info in ss_infos.splitlines(): if info.find(sanitize_name) != -1: found_infos.append(info) ret.extend([sanitize_name, found_infos]) return ret def psaux(name): ''' Retrieve information corresponding to a "ps aux" filtered with the given pattern. It could be just a name or a regular expression (using python search from "re" module). CLI Example: .. code-block:: bash salt '*' ps.psaux www-data.+apache2 ''' sanitize_name = six.text_type(name) pattern = re.compile(sanitize_name) salt_exception_pattern = re.compile("salt.+ps.psaux.+") ps_aux = __salt__['cmd.run']("ps aux") found_infos = [] ret = [] nb_lines = 0 for info in ps_aux.splitlines(): found = pattern.search(info) if found is not None: # remove 'salt' command from results if not salt_exception_pattern.search(info): nb_lines += 1 found_infos.append(info) pid_count = six.text_type(nb_lines) + " occurence(s)." ret = [] ret.extend([sanitize_name, found_infos, pid_count]) return ret
saltstack/salt
salt/modules/ps.py
network_io_counters
python
def network_io_counters(interface=None): ''' Return network I/O statistics. CLI Example: .. code-block:: bash salt '*' ps.network_io_counters salt '*' ps.network_io_counters interface=eth0 ''' if not interface: return dict(psutil.net_io_counters()._asdict()) else: stats = psutil.net_io_counters(pernic=True) if interface in stats: return dict(stats[interface]._asdict()) else: return False
Return network I/O statistics. CLI Example: .. code-block:: bash salt '*' ps.network_io_counters salt '*' ps.network_io_counters interface=eth0
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/ps.py#L585-L604
null
# -*- coding: utf-8 -*- ''' A salt interface to psutil, a system and process library. See http://code.google.com/p/psutil. :depends: - psutil Python module, version 0.3.0 or later - python-utmp package (optional) ''' # Import python libs from __future__ import absolute_import, unicode_literals, print_function import time import datetime import re # Import salt libs import salt.utils.data from salt.exceptions import SaltInvocationError, CommandExecutionError # Import third party libs import salt.utils.decorators.path from salt.ext import six # pylint: disable=import-error try: import salt.utils.psutil_compat as psutil HAS_PSUTIL = True PSUTIL2 = getattr(psutil, 'version_info', ()) >= (2, 0) except ImportError: HAS_PSUTIL = False # pylint: enable=import-error def __virtual__(): if not HAS_PSUTIL: return False, 'The ps module cannot be loaded: python module psutil not installed.' # Functions and attributes used in this execution module seem to have been # added as of psutil 0.3.0, from an inspection of the source code. Only # make this module available if the version of psutil is >= 0.3.0. Note # that this may need to be tweaked if we find post-0.3.0 versions which # also have problems running the functions in this execution module, but # most distributions have already moved to later versions (for example, # as of Dec. 2013 EPEL is on 0.6.1, Debian 7 is on 0.5.1, etc.). if psutil.version_info >= (0, 3, 0): return True return (False, 'The ps execution module cannot be loaded: the psutil python module version {0} is less than 0.3.0'.format(psutil.version_info)) def _get_proc_cmdline(proc): ''' Returns the cmdline of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.cmdline() if PSUTIL2 else proc.cmdline) except (psutil.NoSuchProcess, psutil.AccessDenied): return [] def _get_proc_create_time(proc): ''' Returns the create_time of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.create_time() if PSUTIL2 else proc.create_time) except (psutil.NoSuchProcess, psutil.AccessDenied): return None def _get_proc_name(proc): ''' Returns the name of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.name() if PSUTIL2 else proc.name) except (psutil.NoSuchProcess, psutil.AccessDenied): return [] def _get_proc_status(proc): ''' Returns the status of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.status() if PSUTIL2 else proc.status) except (psutil.NoSuchProcess, psutil.AccessDenied): return None def _get_proc_username(proc): ''' Returns the username of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.username() if PSUTIL2 else proc.username) except (psutil.NoSuchProcess, psutil.AccessDenied, KeyError): return None def _get_proc_pid(proc): ''' Returns the pid of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' return proc.pid def top(num_processes=5, interval=3): ''' Return a list of top CPU consuming processes during the interval. num_processes = return the top N CPU consuming processes interval = the number of seconds to sample CPU usage over CLI Examples: .. code-block:: bash salt '*' ps.top salt '*' ps.top 5 10 ''' result = [] start_usage = {} for pid in psutil.pids(): try: process = psutil.Process(pid) user, system = process.cpu_times() except ValueError: user, system, _, _ = process.cpu_times() except psutil.NoSuchProcess: continue start_usage[process] = user + system time.sleep(interval) usage = set() for process, start in six.iteritems(start_usage): try: user, system = process.cpu_times() except ValueError: user, system, _, _ = process.cpu_times() except psutil.NoSuchProcess: continue now = user + system diff = now - start usage.add((diff, process)) for idx, (diff, process) in enumerate(reversed(sorted(usage))): if num_processes and idx >= num_processes: break if not _get_proc_cmdline(process): cmdline = _get_proc_name(process) else: cmdline = _get_proc_cmdline(process) info = {'cmd': cmdline, 'user': _get_proc_username(process), 'status': _get_proc_status(process), 'pid': _get_proc_pid(process), 'create_time': _get_proc_create_time(process), 'cpu': {}, 'mem': {}, } for key, value in six.iteritems(process.cpu_times()._asdict()): info['cpu'][key] = value for key, value in six.iteritems(process.memory_info()._asdict()): info['mem'][key] = value result.append(info) return result def get_pid_list(): ''' Return a list of process ids (PIDs) for all running processes. CLI Example: .. code-block:: bash salt '*' ps.get_pid_list ''' return psutil.pids() def proc_info(pid, attrs=None): ''' Return a dictionary of information for a process id (PID). CLI Example: .. code-block:: bash salt '*' ps.proc_info 2322 salt '*' ps.proc_info 2322 attrs='["pid", "name"]' pid PID of process to query. attrs Optional list of desired process attributes. The list of possible attributes can be found here: http://pythonhosted.org/psutil/#psutil.Process ''' try: proc = psutil.Process(pid) return proc.as_dict(attrs) except (psutil.NoSuchProcess, psutil.AccessDenied, AttributeError) as exc: raise CommandExecutionError(exc) def kill_pid(pid, signal=15): ''' Kill a process by PID. .. code-block:: bash salt 'minion' ps.kill_pid pid [signal=signal_number] pid PID of process to kill. signal Signal to send to the process. See manpage entry for kill for possible values. Default: 15 (SIGTERM). **Example:** Send SIGKILL to process with PID 2000: .. code-block:: bash salt 'minion' ps.kill_pid 2000 signal=9 ''' try: psutil.Process(pid).send_signal(signal) return True except psutil.NoSuchProcess: return False def pkill(pattern, user=None, signal=15, full=False): ''' Kill processes matching a pattern. .. code-block:: bash salt '*' ps.pkill pattern [user=username] [signal=signal_number] \\ [full=(true|false)] pattern Pattern to search for in the process list. user Limit matches to the given username. Default: All users. signal Signal to send to the process(es). See manpage entry for kill for possible values. Default: 15 (SIGTERM). full A boolean value indicating whether only the name of the command or the full command line should be matched against the pattern. **Examples:** Send SIGHUP to all httpd processes on all 'www' minions: .. code-block:: bash salt 'www.*' ps.pkill httpd signal=1 Send SIGKILL to all bash processes owned by user 'tom': .. code-block:: bash salt '*' ps.pkill bash signal=9 user=tom ''' killed = [] for proc in psutil.process_iter(): name_match = pattern in ' '.join(_get_proc_cmdline(proc)) if full \ else pattern in _get_proc_name(proc) user_match = True if user is None else user == _get_proc_username(proc) if name_match and user_match: try: proc.send_signal(signal) killed.append(_get_proc_pid(proc)) except psutil.NoSuchProcess: pass if not killed: return None else: return {'killed': killed} def pgrep(pattern, user=None, full=False, pattern_is_regex=False): ''' Return the pids for processes matching a pattern. If full is true, the full command line is searched for a match, otherwise only the name of the command is searched. .. code-block:: bash salt '*' ps.pgrep pattern [user=username] [full=(true|false)] pattern Pattern to search for in the process list. user Limit matches to the given username. Default: All users. full A boolean value indicating whether only the name of the command or the full command line should be matched against the pattern. pattern_is_regex This flag enables ps.pgrep to mirror the regex search functionality found in the pgrep command line utility. .. versionadded:: Neon **Examples:** Find all httpd processes on all 'www' minions: .. code-block:: bash salt 'www.*' ps.pgrep httpd Find all bash processes owned by user 'tom': .. code-block:: bash salt '*' ps.pgrep bash user=tom ''' procs = [] if pattern_is_regex: pattern = re.compile(str(pattern)) procs = [] for proc in psutil.process_iter(): if full: process_line = ' '.join(_get_proc_cmdline(proc)) else: process_line = _get_proc_name(proc) if pattern_is_regex: name_match = re.search(pattern, process_line) else: name_match = pattern in process_line user_match = True if user is None else user == _get_proc_username(proc) if name_match and user_match: procs.append(_get_proc_pid(proc)) return procs or None def cpu_percent(interval=0.1, per_cpu=False): ''' Return the percent of time the CPU is busy. interval the number of seconds to sample CPU usage over per_cpu if True return an array of CPU percent busy for each CPU, otherwise aggregate all percents into one number CLI Example: .. code-block:: bash salt '*' ps.cpu_percent ''' if per_cpu: result = list(psutil.cpu_percent(interval, True)) else: result = psutil.cpu_percent(interval) return result def cpu_times(per_cpu=False): ''' Return the percent of time the CPU spends in each state, e.g. user, system, idle, nice, iowait, irq, softirq. per_cpu if True return an array of percents for each CPU, otherwise aggregate all percents into one number CLI Example: .. code-block:: bash salt '*' ps.cpu_times ''' if per_cpu: result = [dict(times._asdict()) for times in psutil.cpu_times(True)] else: result = dict(psutil.cpu_times(per_cpu)._asdict()) return result def virtual_memory(): ''' .. versionadded:: 2014.7.0 Return a dict that describes statistics about system memory usage. .. note:: This function is only available in psutil version 0.6.0 and above. CLI Example: .. code-block:: bash salt '*' ps.virtual_memory ''' if psutil.version_info < (0, 6, 0): msg = 'virtual_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) return dict(psutil.virtual_memory()._asdict()) def swap_memory(): ''' .. versionadded:: 2014.7.0 Return a dict that describes swap memory statistics. .. note:: This function is only available in psutil version 0.6.0 and above. CLI Example: .. code-block:: bash salt '*' ps.swap_memory ''' if psutil.version_info < (0, 6, 0): msg = 'swap_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) return dict(psutil.swap_memory()._asdict()) def disk_partitions(all=False): ''' Return a list of disk partitions and their device, mount point, and filesystem type. all if set to False, only return local, physical partitions (hard disk, USB, CD/DVD partitions). If True, return all filesystems. CLI Example: .. code-block:: bash salt '*' ps.disk_partitions ''' result = [dict(partition._asdict()) for partition in psutil.disk_partitions(all)] return result def disk_usage(path): ''' Given a path, return a dict listing the total available space as well as the free space, and used space. CLI Example: .. code-block:: bash salt '*' ps.disk_usage /home ''' return dict(psutil.disk_usage(path)._asdict()) def disk_partition_usage(all=False): ''' Return a list of disk partitions plus the mount point, filesystem and usage statistics. CLI Example: .. code-block:: bash salt '*' ps.disk_partition_usage ''' result = disk_partitions(all) for partition in result: partition.update(disk_usage(partition['mountpoint'])) return result def total_physical_memory(): ''' Return the total number of bytes of physical memory. CLI Example: .. code-block:: bash salt '*' ps.total_physical_memory ''' if psutil.version_info < (0, 6, 0): msg = 'virtual_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) try: return psutil.virtual_memory().total except AttributeError: # TOTAL_PHYMEM is deprecated but with older psutil versions this is # needed as a fallback. return psutil.TOTAL_PHYMEM def num_cpus(): ''' Return the number of CPUs. CLI Example: .. code-block:: bash salt '*' ps.num_cpus ''' try: return psutil.cpu_count() except AttributeError: # NUM_CPUS is deprecated but with older psutil versions this is needed # as a fallback. return psutil.NUM_CPUS def boot_time(time_format=None): ''' Return the boot time in number of seconds since the epoch began. CLI Example: time_format Optionally specify a `strftime`_ format string. Use ``time_format='%c'`` to get a nicely-formatted locale specific date and time (i.e. ``Fri May 2 19:08:32 2014``). .. _strftime: https://docs.python.org/2/library/datetime.html#strftime-strptime-behavior .. versionadded:: 2014.1.4 .. code-block:: bash salt '*' ps.boot_time ''' try: b_time = int(psutil.boot_time()) except AttributeError: # get_boot_time() has been removed in newer psutil versions, and has # been replaced by boot_time() which provides the same information. b_time = int(psutil.boot_time()) if time_format: # Load epoch timestamp as a datetime.datetime object b_time = datetime.datetime.fromtimestamp(b_time) try: return b_time.strftime(time_format) except TypeError as exc: raise SaltInvocationError('Invalid format string: {0}'.format(exc)) return b_time def disk_io_counters(device=None): ''' Return disk I/O statistics. CLI Example: .. code-block:: bash salt '*' ps.disk_io_counters salt '*' ps.disk_io_counters device=sda1 ''' if not device: return dict(psutil.disk_io_counters()._asdict()) else: stats = psutil.disk_io_counters(perdisk=True) if device in stats: return dict(stats[device]._asdict()) else: return False def get_users(): ''' Return logged-in users. CLI Example: .. code-block:: bash salt '*' ps.get_users ''' try: recs = psutil.users() return [dict(x._asdict()) for x in recs] except AttributeError: # get_users is only present in psutil > v0.5.0 # try utmp try: import utmp # pylint: disable=import-error result = [] while True: rec = utmp.utmpaccess.getutent() if rec is None: return result elif rec[0] == 7: started = rec[8] if isinstance(started, tuple): started = started[0] result.append({'name': rec[4], 'terminal': rec[2], 'started': started, 'host': rec[5]}) except ImportError: return False def lsof(name): ''' Retrieve the lsof information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.lsof apache2 ''' sanitize_name = six.text_type(name) lsof_infos = __salt__['cmd.run']("lsof -c " + sanitize_name) ret = [] ret.extend([sanitize_name, lsof_infos]) return ret @salt.utils.decorators.path.which('netstat') def netstat(name): ''' Retrieve the netstat information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.netstat apache2 ''' sanitize_name = six.text_type(name) netstat_infos = __salt__['cmd.run']("netstat -nap") found_infos = [] ret = [] for info in netstat_infos.splitlines(): if info.find(sanitize_name) != -1: found_infos.append(info) ret.extend([sanitize_name, found_infos]) return ret @salt.utils.decorators.path.which('ss') def ss(name): ''' Retrieve the ss information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.ss apache2 .. versionadded:: 2016.11.6 ''' sanitize_name = six.text_type(name) ss_infos = __salt__['cmd.run']("ss -neap") found_infos = [] ret = [] for info in ss_infos.splitlines(): if info.find(sanitize_name) != -1: found_infos.append(info) ret.extend([sanitize_name, found_infos]) return ret def psaux(name): ''' Retrieve information corresponding to a "ps aux" filtered with the given pattern. It could be just a name or a regular expression (using python search from "re" module). CLI Example: .. code-block:: bash salt '*' ps.psaux www-data.+apache2 ''' sanitize_name = six.text_type(name) pattern = re.compile(sanitize_name) salt_exception_pattern = re.compile("salt.+ps.psaux.+") ps_aux = __salt__['cmd.run']("ps aux") found_infos = [] ret = [] nb_lines = 0 for info in ps_aux.splitlines(): found = pattern.search(info) if found is not None: # remove 'salt' command from results if not salt_exception_pattern.search(info): nb_lines += 1 found_infos.append(info) pid_count = six.text_type(nb_lines) + " occurence(s)." ret = [] ret.extend([sanitize_name, found_infos, pid_count]) return ret
saltstack/salt
salt/modules/ps.py
disk_io_counters
python
def disk_io_counters(device=None): ''' Return disk I/O statistics. CLI Example: .. code-block:: bash salt '*' ps.disk_io_counters salt '*' ps.disk_io_counters device=sda1 ''' if not device: return dict(psutil.disk_io_counters()._asdict()) else: stats = psutil.disk_io_counters(perdisk=True) if device in stats: return dict(stats[device]._asdict()) else: return False
Return disk I/O statistics. CLI Example: .. code-block:: bash salt '*' ps.disk_io_counters salt '*' ps.disk_io_counters device=sda1
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/ps.py#L607-L626
null
# -*- coding: utf-8 -*- ''' A salt interface to psutil, a system and process library. See http://code.google.com/p/psutil. :depends: - psutil Python module, version 0.3.0 or later - python-utmp package (optional) ''' # Import python libs from __future__ import absolute_import, unicode_literals, print_function import time import datetime import re # Import salt libs import salt.utils.data from salt.exceptions import SaltInvocationError, CommandExecutionError # Import third party libs import salt.utils.decorators.path from salt.ext import six # pylint: disable=import-error try: import salt.utils.psutil_compat as psutil HAS_PSUTIL = True PSUTIL2 = getattr(psutil, 'version_info', ()) >= (2, 0) except ImportError: HAS_PSUTIL = False # pylint: enable=import-error def __virtual__(): if not HAS_PSUTIL: return False, 'The ps module cannot be loaded: python module psutil not installed.' # Functions and attributes used in this execution module seem to have been # added as of psutil 0.3.0, from an inspection of the source code. Only # make this module available if the version of psutil is >= 0.3.0. Note # that this may need to be tweaked if we find post-0.3.0 versions which # also have problems running the functions in this execution module, but # most distributions have already moved to later versions (for example, # as of Dec. 2013 EPEL is on 0.6.1, Debian 7 is on 0.5.1, etc.). if psutil.version_info >= (0, 3, 0): return True return (False, 'The ps execution module cannot be loaded: the psutil python module version {0} is less than 0.3.0'.format(psutil.version_info)) def _get_proc_cmdline(proc): ''' Returns the cmdline of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.cmdline() if PSUTIL2 else proc.cmdline) except (psutil.NoSuchProcess, psutil.AccessDenied): return [] def _get_proc_create_time(proc): ''' Returns the create_time of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.create_time() if PSUTIL2 else proc.create_time) except (psutil.NoSuchProcess, psutil.AccessDenied): return None def _get_proc_name(proc): ''' Returns the name of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.name() if PSUTIL2 else proc.name) except (psutil.NoSuchProcess, psutil.AccessDenied): return [] def _get_proc_status(proc): ''' Returns the status of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.status() if PSUTIL2 else proc.status) except (psutil.NoSuchProcess, psutil.AccessDenied): return None def _get_proc_username(proc): ''' Returns the username of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.username() if PSUTIL2 else proc.username) except (psutil.NoSuchProcess, psutil.AccessDenied, KeyError): return None def _get_proc_pid(proc): ''' Returns the pid of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' return proc.pid def top(num_processes=5, interval=3): ''' Return a list of top CPU consuming processes during the interval. num_processes = return the top N CPU consuming processes interval = the number of seconds to sample CPU usage over CLI Examples: .. code-block:: bash salt '*' ps.top salt '*' ps.top 5 10 ''' result = [] start_usage = {} for pid in psutil.pids(): try: process = psutil.Process(pid) user, system = process.cpu_times() except ValueError: user, system, _, _ = process.cpu_times() except psutil.NoSuchProcess: continue start_usage[process] = user + system time.sleep(interval) usage = set() for process, start in six.iteritems(start_usage): try: user, system = process.cpu_times() except ValueError: user, system, _, _ = process.cpu_times() except psutil.NoSuchProcess: continue now = user + system diff = now - start usage.add((diff, process)) for idx, (diff, process) in enumerate(reversed(sorted(usage))): if num_processes and idx >= num_processes: break if not _get_proc_cmdline(process): cmdline = _get_proc_name(process) else: cmdline = _get_proc_cmdline(process) info = {'cmd': cmdline, 'user': _get_proc_username(process), 'status': _get_proc_status(process), 'pid': _get_proc_pid(process), 'create_time': _get_proc_create_time(process), 'cpu': {}, 'mem': {}, } for key, value in six.iteritems(process.cpu_times()._asdict()): info['cpu'][key] = value for key, value in six.iteritems(process.memory_info()._asdict()): info['mem'][key] = value result.append(info) return result def get_pid_list(): ''' Return a list of process ids (PIDs) for all running processes. CLI Example: .. code-block:: bash salt '*' ps.get_pid_list ''' return psutil.pids() def proc_info(pid, attrs=None): ''' Return a dictionary of information for a process id (PID). CLI Example: .. code-block:: bash salt '*' ps.proc_info 2322 salt '*' ps.proc_info 2322 attrs='["pid", "name"]' pid PID of process to query. attrs Optional list of desired process attributes. The list of possible attributes can be found here: http://pythonhosted.org/psutil/#psutil.Process ''' try: proc = psutil.Process(pid) return proc.as_dict(attrs) except (psutil.NoSuchProcess, psutil.AccessDenied, AttributeError) as exc: raise CommandExecutionError(exc) def kill_pid(pid, signal=15): ''' Kill a process by PID. .. code-block:: bash salt 'minion' ps.kill_pid pid [signal=signal_number] pid PID of process to kill. signal Signal to send to the process. See manpage entry for kill for possible values. Default: 15 (SIGTERM). **Example:** Send SIGKILL to process with PID 2000: .. code-block:: bash salt 'minion' ps.kill_pid 2000 signal=9 ''' try: psutil.Process(pid).send_signal(signal) return True except psutil.NoSuchProcess: return False def pkill(pattern, user=None, signal=15, full=False): ''' Kill processes matching a pattern. .. code-block:: bash salt '*' ps.pkill pattern [user=username] [signal=signal_number] \\ [full=(true|false)] pattern Pattern to search for in the process list. user Limit matches to the given username. Default: All users. signal Signal to send to the process(es). See manpage entry for kill for possible values. Default: 15 (SIGTERM). full A boolean value indicating whether only the name of the command or the full command line should be matched against the pattern. **Examples:** Send SIGHUP to all httpd processes on all 'www' minions: .. code-block:: bash salt 'www.*' ps.pkill httpd signal=1 Send SIGKILL to all bash processes owned by user 'tom': .. code-block:: bash salt '*' ps.pkill bash signal=9 user=tom ''' killed = [] for proc in psutil.process_iter(): name_match = pattern in ' '.join(_get_proc_cmdline(proc)) if full \ else pattern in _get_proc_name(proc) user_match = True if user is None else user == _get_proc_username(proc) if name_match and user_match: try: proc.send_signal(signal) killed.append(_get_proc_pid(proc)) except psutil.NoSuchProcess: pass if not killed: return None else: return {'killed': killed} def pgrep(pattern, user=None, full=False, pattern_is_regex=False): ''' Return the pids for processes matching a pattern. If full is true, the full command line is searched for a match, otherwise only the name of the command is searched. .. code-block:: bash salt '*' ps.pgrep pattern [user=username] [full=(true|false)] pattern Pattern to search for in the process list. user Limit matches to the given username. Default: All users. full A boolean value indicating whether only the name of the command or the full command line should be matched against the pattern. pattern_is_regex This flag enables ps.pgrep to mirror the regex search functionality found in the pgrep command line utility. .. versionadded:: Neon **Examples:** Find all httpd processes on all 'www' minions: .. code-block:: bash salt 'www.*' ps.pgrep httpd Find all bash processes owned by user 'tom': .. code-block:: bash salt '*' ps.pgrep bash user=tom ''' procs = [] if pattern_is_regex: pattern = re.compile(str(pattern)) procs = [] for proc in psutil.process_iter(): if full: process_line = ' '.join(_get_proc_cmdline(proc)) else: process_line = _get_proc_name(proc) if pattern_is_regex: name_match = re.search(pattern, process_line) else: name_match = pattern in process_line user_match = True if user is None else user == _get_proc_username(proc) if name_match and user_match: procs.append(_get_proc_pid(proc)) return procs or None def cpu_percent(interval=0.1, per_cpu=False): ''' Return the percent of time the CPU is busy. interval the number of seconds to sample CPU usage over per_cpu if True return an array of CPU percent busy for each CPU, otherwise aggregate all percents into one number CLI Example: .. code-block:: bash salt '*' ps.cpu_percent ''' if per_cpu: result = list(psutil.cpu_percent(interval, True)) else: result = psutil.cpu_percent(interval) return result def cpu_times(per_cpu=False): ''' Return the percent of time the CPU spends in each state, e.g. user, system, idle, nice, iowait, irq, softirq. per_cpu if True return an array of percents for each CPU, otherwise aggregate all percents into one number CLI Example: .. code-block:: bash salt '*' ps.cpu_times ''' if per_cpu: result = [dict(times._asdict()) for times in psutil.cpu_times(True)] else: result = dict(psutil.cpu_times(per_cpu)._asdict()) return result def virtual_memory(): ''' .. versionadded:: 2014.7.0 Return a dict that describes statistics about system memory usage. .. note:: This function is only available in psutil version 0.6.0 and above. CLI Example: .. code-block:: bash salt '*' ps.virtual_memory ''' if psutil.version_info < (0, 6, 0): msg = 'virtual_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) return dict(psutil.virtual_memory()._asdict()) def swap_memory(): ''' .. versionadded:: 2014.7.0 Return a dict that describes swap memory statistics. .. note:: This function is only available in psutil version 0.6.0 and above. CLI Example: .. code-block:: bash salt '*' ps.swap_memory ''' if psutil.version_info < (0, 6, 0): msg = 'swap_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) return dict(psutil.swap_memory()._asdict()) def disk_partitions(all=False): ''' Return a list of disk partitions and their device, mount point, and filesystem type. all if set to False, only return local, physical partitions (hard disk, USB, CD/DVD partitions). If True, return all filesystems. CLI Example: .. code-block:: bash salt '*' ps.disk_partitions ''' result = [dict(partition._asdict()) for partition in psutil.disk_partitions(all)] return result def disk_usage(path): ''' Given a path, return a dict listing the total available space as well as the free space, and used space. CLI Example: .. code-block:: bash salt '*' ps.disk_usage /home ''' return dict(psutil.disk_usage(path)._asdict()) def disk_partition_usage(all=False): ''' Return a list of disk partitions plus the mount point, filesystem and usage statistics. CLI Example: .. code-block:: bash salt '*' ps.disk_partition_usage ''' result = disk_partitions(all) for partition in result: partition.update(disk_usage(partition['mountpoint'])) return result def total_physical_memory(): ''' Return the total number of bytes of physical memory. CLI Example: .. code-block:: bash salt '*' ps.total_physical_memory ''' if psutil.version_info < (0, 6, 0): msg = 'virtual_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) try: return psutil.virtual_memory().total except AttributeError: # TOTAL_PHYMEM is deprecated but with older psutil versions this is # needed as a fallback. return psutil.TOTAL_PHYMEM def num_cpus(): ''' Return the number of CPUs. CLI Example: .. code-block:: bash salt '*' ps.num_cpus ''' try: return psutil.cpu_count() except AttributeError: # NUM_CPUS is deprecated but with older psutil versions this is needed # as a fallback. return psutil.NUM_CPUS def boot_time(time_format=None): ''' Return the boot time in number of seconds since the epoch began. CLI Example: time_format Optionally specify a `strftime`_ format string. Use ``time_format='%c'`` to get a nicely-formatted locale specific date and time (i.e. ``Fri May 2 19:08:32 2014``). .. _strftime: https://docs.python.org/2/library/datetime.html#strftime-strptime-behavior .. versionadded:: 2014.1.4 .. code-block:: bash salt '*' ps.boot_time ''' try: b_time = int(psutil.boot_time()) except AttributeError: # get_boot_time() has been removed in newer psutil versions, and has # been replaced by boot_time() which provides the same information. b_time = int(psutil.boot_time()) if time_format: # Load epoch timestamp as a datetime.datetime object b_time = datetime.datetime.fromtimestamp(b_time) try: return b_time.strftime(time_format) except TypeError as exc: raise SaltInvocationError('Invalid format string: {0}'.format(exc)) return b_time def network_io_counters(interface=None): ''' Return network I/O statistics. CLI Example: .. code-block:: bash salt '*' ps.network_io_counters salt '*' ps.network_io_counters interface=eth0 ''' if not interface: return dict(psutil.net_io_counters()._asdict()) else: stats = psutil.net_io_counters(pernic=True) if interface in stats: return dict(stats[interface]._asdict()) else: return False def get_users(): ''' Return logged-in users. CLI Example: .. code-block:: bash salt '*' ps.get_users ''' try: recs = psutil.users() return [dict(x._asdict()) for x in recs] except AttributeError: # get_users is only present in psutil > v0.5.0 # try utmp try: import utmp # pylint: disable=import-error result = [] while True: rec = utmp.utmpaccess.getutent() if rec is None: return result elif rec[0] == 7: started = rec[8] if isinstance(started, tuple): started = started[0] result.append({'name': rec[4], 'terminal': rec[2], 'started': started, 'host': rec[5]}) except ImportError: return False def lsof(name): ''' Retrieve the lsof information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.lsof apache2 ''' sanitize_name = six.text_type(name) lsof_infos = __salt__['cmd.run']("lsof -c " + sanitize_name) ret = [] ret.extend([sanitize_name, lsof_infos]) return ret @salt.utils.decorators.path.which('netstat') def netstat(name): ''' Retrieve the netstat information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.netstat apache2 ''' sanitize_name = six.text_type(name) netstat_infos = __salt__['cmd.run']("netstat -nap") found_infos = [] ret = [] for info in netstat_infos.splitlines(): if info.find(sanitize_name) != -1: found_infos.append(info) ret.extend([sanitize_name, found_infos]) return ret @salt.utils.decorators.path.which('ss') def ss(name): ''' Retrieve the ss information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.ss apache2 .. versionadded:: 2016.11.6 ''' sanitize_name = six.text_type(name) ss_infos = __salt__['cmd.run']("ss -neap") found_infos = [] ret = [] for info in ss_infos.splitlines(): if info.find(sanitize_name) != -1: found_infos.append(info) ret.extend([sanitize_name, found_infos]) return ret def psaux(name): ''' Retrieve information corresponding to a "ps aux" filtered with the given pattern. It could be just a name or a regular expression (using python search from "re" module). CLI Example: .. code-block:: bash salt '*' ps.psaux www-data.+apache2 ''' sanitize_name = six.text_type(name) pattern = re.compile(sanitize_name) salt_exception_pattern = re.compile("salt.+ps.psaux.+") ps_aux = __salt__['cmd.run']("ps aux") found_infos = [] ret = [] nb_lines = 0 for info in ps_aux.splitlines(): found = pattern.search(info) if found is not None: # remove 'salt' command from results if not salt_exception_pattern.search(info): nb_lines += 1 found_infos.append(info) pid_count = six.text_type(nb_lines) + " occurence(s)." ret = [] ret.extend([sanitize_name, found_infos, pid_count]) return ret
saltstack/salt
salt/modules/ps.py
get_users
python
def get_users(): ''' Return logged-in users. CLI Example: .. code-block:: bash salt '*' ps.get_users ''' try: recs = psutil.users() return [dict(x._asdict()) for x in recs] except AttributeError: # get_users is only present in psutil > v0.5.0 # try utmp try: import utmp # pylint: disable=import-error result = [] while True: rec = utmp.utmpaccess.getutent() if rec is None: return result elif rec[0] == 7: started = rec[8] if isinstance(started, tuple): started = started[0] result.append({'name': rec[4], 'terminal': rec[2], 'started': started, 'host': rec[5]}) except ImportError: return False
Return logged-in users. CLI Example: .. code-block:: bash salt '*' ps.get_users
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/ps.py#L629-L660
null
# -*- coding: utf-8 -*- ''' A salt interface to psutil, a system and process library. See http://code.google.com/p/psutil. :depends: - psutil Python module, version 0.3.0 or later - python-utmp package (optional) ''' # Import python libs from __future__ import absolute_import, unicode_literals, print_function import time import datetime import re # Import salt libs import salt.utils.data from salt.exceptions import SaltInvocationError, CommandExecutionError # Import third party libs import salt.utils.decorators.path from salt.ext import six # pylint: disable=import-error try: import salt.utils.psutil_compat as psutil HAS_PSUTIL = True PSUTIL2 = getattr(psutil, 'version_info', ()) >= (2, 0) except ImportError: HAS_PSUTIL = False # pylint: enable=import-error def __virtual__(): if not HAS_PSUTIL: return False, 'The ps module cannot be loaded: python module psutil not installed.' # Functions and attributes used in this execution module seem to have been # added as of psutil 0.3.0, from an inspection of the source code. Only # make this module available if the version of psutil is >= 0.3.0. Note # that this may need to be tweaked if we find post-0.3.0 versions which # also have problems running the functions in this execution module, but # most distributions have already moved to later versions (for example, # as of Dec. 2013 EPEL is on 0.6.1, Debian 7 is on 0.5.1, etc.). if psutil.version_info >= (0, 3, 0): return True return (False, 'The ps execution module cannot be loaded: the psutil python module version {0} is less than 0.3.0'.format(psutil.version_info)) def _get_proc_cmdline(proc): ''' Returns the cmdline of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.cmdline() if PSUTIL2 else proc.cmdline) except (psutil.NoSuchProcess, psutil.AccessDenied): return [] def _get_proc_create_time(proc): ''' Returns the create_time of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.create_time() if PSUTIL2 else proc.create_time) except (psutil.NoSuchProcess, psutil.AccessDenied): return None def _get_proc_name(proc): ''' Returns the name of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.name() if PSUTIL2 else proc.name) except (psutil.NoSuchProcess, psutil.AccessDenied): return [] def _get_proc_status(proc): ''' Returns the status of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.status() if PSUTIL2 else proc.status) except (psutil.NoSuchProcess, psutil.AccessDenied): return None def _get_proc_username(proc): ''' Returns the username of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.username() if PSUTIL2 else proc.username) except (psutil.NoSuchProcess, psutil.AccessDenied, KeyError): return None def _get_proc_pid(proc): ''' Returns the pid of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' return proc.pid def top(num_processes=5, interval=3): ''' Return a list of top CPU consuming processes during the interval. num_processes = return the top N CPU consuming processes interval = the number of seconds to sample CPU usage over CLI Examples: .. code-block:: bash salt '*' ps.top salt '*' ps.top 5 10 ''' result = [] start_usage = {} for pid in psutil.pids(): try: process = psutil.Process(pid) user, system = process.cpu_times() except ValueError: user, system, _, _ = process.cpu_times() except psutil.NoSuchProcess: continue start_usage[process] = user + system time.sleep(interval) usage = set() for process, start in six.iteritems(start_usage): try: user, system = process.cpu_times() except ValueError: user, system, _, _ = process.cpu_times() except psutil.NoSuchProcess: continue now = user + system diff = now - start usage.add((diff, process)) for idx, (diff, process) in enumerate(reversed(sorted(usage))): if num_processes and idx >= num_processes: break if not _get_proc_cmdline(process): cmdline = _get_proc_name(process) else: cmdline = _get_proc_cmdline(process) info = {'cmd': cmdline, 'user': _get_proc_username(process), 'status': _get_proc_status(process), 'pid': _get_proc_pid(process), 'create_time': _get_proc_create_time(process), 'cpu': {}, 'mem': {}, } for key, value in six.iteritems(process.cpu_times()._asdict()): info['cpu'][key] = value for key, value in six.iteritems(process.memory_info()._asdict()): info['mem'][key] = value result.append(info) return result def get_pid_list(): ''' Return a list of process ids (PIDs) for all running processes. CLI Example: .. code-block:: bash salt '*' ps.get_pid_list ''' return psutil.pids() def proc_info(pid, attrs=None): ''' Return a dictionary of information for a process id (PID). CLI Example: .. code-block:: bash salt '*' ps.proc_info 2322 salt '*' ps.proc_info 2322 attrs='["pid", "name"]' pid PID of process to query. attrs Optional list of desired process attributes. The list of possible attributes can be found here: http://pythonhosted.org/psutil/#psutil.Process ''' try: proc = psutil.Process(pid) return proc.as_dict(attrs) except (psutil.NoSuchProcess, psutil.AccessDenied, AttributeError) as exc: raise CommandExecutionError(exc) def kill_pid(pid, signal=15): ''' Kill a process by PID. .. code-block:: bash salt 'minion' ps.kill_pid pid [signal=signal_number] pid PID of process to kill. signal Signal to send to the process. See manpage entry for kill for possible values. Default: 15 (SIGTERM). **Example:** Send SIGKILL to process with PID 2000: .. code-block:: bash salt 'minion' ps.kill_pid 2000 signal=9 ''' try: psutil.Process(pid).send_signal(signal) return True except psutil.NoSuchProcess: return False def pkill(pattern, user=None, signal=15, full=False): ''' Kill processes matching a pattern. .. code-block:: bash salt '*' ps.pkill pattern [user=username] [signal=signal_number] \\ [full=(true|false)] pattern Pattern to search for in the process list. user Limit matches to the given username. Default: All users. signal Signal to send to the process(es). See manpage entry for kill for possible values. Default: 15 (SIGTERM). full A boolean value indicating whether only the name of the command or the full command line should be matched against the pattern. **Examples:** Send SIGHUP to all httpd processes on all 'www' minions: .. code-block:: bash salt 'www.*' ps.pkill httpd signal=1 Send SIGKILL to all bash processes owned by user 'tom': .. code-block:: bash salt '*' ps.pkill bash signal=9 user=tom ''' killed = [] for proc in psutil.process_iter(): name_match = pattern in ' '.join(_get_proc_cmdline(proc)) if full \ else pattern in _get_proc_name(proc) user_match = True if user is None else user == _get_proc_username(proc) if name_match and user_match: try: proc.send_signal(signal) killed.append(_get_proc_pid(proc)) except psutil.NoSuchProcess: pass if not killed: return None else: return {'killed': killed} def pgrep(pattern, user=None, full=False, pattern_is_regex=False): ''' Return the pids for processes matching a pattern. If full is true, the full command line is searched for a match, otherwise only the name of the command is searched. .. code-block:: bash salt '*' ps.pgrep pattern [user=username] [full=(true|false)] pattern Pattern to search for in the process list. user Limit matches to the given username. Default: All users. full A boolean value indicating whether only the name of the command or the full command line should be matched against the pattern. pattern_is_regex This flag enables ps.pgrep to mirror the regex search functionality found in the pgrep command line utility. .. versionadded:: Neon **Examples:** Find all httpd processes on all 'www' minions: .. code-block:: bash salt 'www.*' ps.pgrep httpd Find all bash processes owned by user 'tom': .. code-block:: bash salt '*' ps.pgrep bash user=tom ''' procs = [] if pattern_is_regex: pattern = re.compile(str(pattern)) procs = [] for proc in psutil.process_iter(): if full: process_line = ' '.join(_get_proc_cmdline(proc)) else: process_line = _get_proc_name(proc) if pattern_is_regex: name_match = re.search(pattern, process_line) else: name_match = pattern in process_line user_match = True if user is None else user == _get_proc_username(proc) if name_match and user_match: procs.append(_get_proc_pid(proc)) return procs or None def cpu_percent(interval=0.1, per_cpu=False): ''' Return the percent of time the CPU is busy. interval the number of seconds to sample CPU usage over per_cpu if True return an array of CPU percent busy for each CPU, otherwise aggregate all percents into one number CLI Example: .. code-block:: bash salt '*' ps.cpu_percent ''' if per_cpu: result = list(psutil.cpu_percent(interval, True)) else: result = psutil.cpu_percent(interval) return result def cpu_times(per_cpu=False): ''' Return the percent of time the CPU spends in each state, e.g. user, system, idle, nice, iowait, irq, softirq. per_cpu if True return an array of percents for each CPU, otherwise aggregate all percents into one number CLI Example: .. code-block:: bash salt '*' ps.cpu_times ''' if per_cpu: result = [dict(times._asdict()) for times in psutil.cpu_times(True)] else: result = dict(psutil.cpu_times(per_cpu)._asdict()) return result def virtual_memory(): ''' .. versionadded:: 2014.7.0 Return a dict that describes statistics about system memory usage. .. note:: This function is only available in psutil version 0.6.0 and above. CLI Example: .. code-block:: bash salt '*' ps.virtual_memory ''' if psutil.version_info < (0, 6, 0): msg = 'virtual_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) return dict(psutil.virtual_memory()._asdict()) def swap_memory(): ''' .. versionadded:: 2014.7.0 Return a dict that describes swap memory statistics. .. note:: This function is only available in psutil version 0.6.0 and above. CLI Example: .. code-block:: bash salt '*' ps.swap_memory ''' if psutil.version_info < (0, 6, 0): msg = 'swap_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) return dict(psutil.swap_memory()._asdict()) def disk_partitions(all=False): ''' Return a list of disk partitions and their device, mount point, and filesystem type. all if set to False, only return local, physical partitions (hard disk, USB, CD/DVD partitions). If True, return all filesystems. CLI Example: .. code-block:: bash salt '*' ps.disk_partitions ''' result = [dict(partition._asdict()) for partition in psutil.disk_partitions(all)] return result def disk_usage(path): ''' Given a path, return a dict listing the total available space as well as the free space, and used space. CLI Example: .. code-block:: bash salt '*' ps.disk_usage /home ''' return dict(psutil.disk_usage(path)._asdict()) def disk_partition_usage(all=False): ''' Return a list of disk partitions plus the mount point, filesystem and usage statistics. CLI Example: .. code-block:: bash salt '*' ps.disk_partition_usage ''' result = disk_partitions(all) for partition in result: partition.update(disk_usage(partition['mountpoint'])) return result def total_physical_memory(): ''' Return the total number of bytes of physical memory. CLI Example: .. code-block:: bash salt '*' ps.total_physical_memory ''' if psutil.version_info < (0, 6, 0): msg = 'virtual_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) try: return psutil.virtual_memory().total except AttributeError: # TOTAL_PHYMEM is deprecated but with older psutil versions this is # needed as a fallback. return psutil.TOTAL_PHYMEM def num_cpus(): ''' Return the number of CPUs. CLI Example: .. code-block:: bash salt '*' ps.num_cpus ''' try: return psutil.cpu_count() except AttributeError: # NUM_CPUS is deprecated but with older psutil versions this is needed # as a fallback. return psutil.NUM_CPUS def boot_time(time_format=None): ''' Return the boot time in number of seconds since the epoch began. CLI Example: time_format Optionally specify a `strftime`_ format string. Use ``time_format='%c'`` to get a nicely-formatted locale specific date and time (i.e. ``Fri May 2 19:08:32 2014``). .. _strftime: https://docs.python.org/2/library/datetime.html#strftime-strptime-behavior .. versionadded:: 2014.1.4 .. code-block:: bash salt '*' ps.boot_time ''' try: b_time = int(psutil.boot_time()) except AttributeError: # get_boot_time() has been removed in newer psutil versions, and has # been replaced by boot_time() which provides the same information. b_time = int(psutil.boot_time()) if time_format: # Load epoch timestamp as a datetime.datetime object b_time = datetime.datetime.fromtimestamp(b_time) try: return b_time.strftime(time_format) except TypeError as exc: raise SaltInvocationError('Invalid format string: {0}'.format(exc)) return b_time def network_io_counters(interface=None): ''' Return network I/O statistics. CLI Example: .. code-block:: bash salt '*' ps.network_io_counters salt '*' ps.network_io_counters interface=eth0 ''' if not interface: return dict(psutil.net_io_counters()._asdict()) else: stats = psutil.net_io_counters(pernic=True) if interface in stats: return dict(stats[interface]._asdict()) else: return False def disk_io_counters(device=None): ''' Return disk I/O statistics. CLI Example: .. code-block:: bash salt '*' ps.disk_io_counters salt '*' ps.disk_io_counters device=sda1 ''' if not device: return dict(psutil.disk_io_counters()._asdict()) else: stats = psutil.disk_io_counters(perdisk=True) if device in stats: return dict(stats[device]._asdict()) else: return False def lsof(name): ''' Retrieve the lsof information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.lsof apache2 ''' sanitize_name = six.text_type(name) lsof_infos = __salt__['cmd.run']("lsof -c " + sanitize_name) ret = [] ret.extend([sanitize_name, lsof_infos]) return ret @salt.utils.decorators.path.which('netstat') def netstat(name): ''' Retrieve the netstat information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.netstat apache2 ''' sanitize_name = six.text_type(name) netstat_infos = __salt__['cmd.run']("netstat -nap") found_infos = [] ret = [] for info in netstat_infos.splitlines(): if info.find(sanitize_name) != -1: found_infos.append(info) ret.extend([sanitize_name, found_infos]) return ret @salt.utils.decorators.path.which('ss') def ss(name): ''' Retrieve the ss information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.ss apache2 .. versionadded:: 2016.11.6 ''' sanitize_name = six.text_type(name) ss_infos = __salt__['cmd.run']("ss -neap") found_infos = [] ret = [] for info in ss_infos.splitlines(): if info.find(sanitize_name) != -1: found_infos.append(info) ret.extend([sanitize_name, found_infos]) return ret def psaux(name): ''' Retrieve information corresponding to a "ps aux" filtered with the given pattern. It could be just a name or a regular expression (using python search from "re" module). CLI Example: .. code-block:: bash salt '*' ps.psaux www-data.+apache2 ''' sanitize_name = six.text_type(name) pattern = re.compile(sanitize_name) salt_exception_pattern = re.compile("salt.+ps.psaux.+") ps_aux = __salt__['cmd.run']("ps aux") found_infos = [] ret = [] nb_lines = 0 for info in ps_aux.splitlines(): found = pattern.search(info) if found is not None: # remove 'salt' command from results if not salt_exception_pattern.search(info): nb_lines += 1 found_infos.append(info) pid_count = six.text_type(nb_lines) + " occurence(s)." ret = [] ret.extend([sanitize_name, found_infos, pid_count]) return ret
saltstack/salt
salt/modules/ps.py
lsof
python
def lsof(name): ''' Retrieve the lsof information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.lsof apache2 ''' sanitize_name = six.text_type(name) lsof_infos = __salt__['cmd.run']("lsof -c " + sanitize_name) ret = [] ret.extend([sanitize_name, lsof_infos]) return ret
Retrieve the lsof information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.lsof apache2
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/ps.py#L663-L677
null
# -*- coding: utf-8 -*- ''' A salt interface to psutil, a system and process library. See http://code.google.com/p/psutil. :depends: - psutil Python module, version 0.3.0 or later - python-utmp package (optional) ''' # Import python libs from __future__ import absolute_import, unicode_literals, print_function import time import datetime import re # Import salt libs import salt.utils.data from salt.exceptions import SaltInvocationError, CommandExecutionError # Import third party libs import salt.utils.decorators.path from salt.ext import six # pylint: disable=import-error try: import salt.utils.psutil_compat as psutil HAS_PSUTIL = True PSUTIL2 = getattr(psutil, 'version_info', ()) >= (2, 0) except ImportError: HAS_PSUTIL = False # pylint: enable=import-error def __virtual__(): if not HAS_PSUTIL: return False, 'The ps module cannot be loaded: python module psutil not installed.' # Functions and attributes used in this execution module seem to have been # added as of psutil 0.3.0, from an inspection of the source code. Only # make this module available if the version of psutil is >= 0.3.0. Note # that this may need to be tweaked if we find post-0.3.0 versions which # also have problems running the functions in this execution module, but # most distributions have already moved to later versions (for example, # as of Dec. 2013 EPEL is on 0.6.1, Debian 7 is on 0.5.1, etc.). if psutil.version_info >= (0, 3, 0): return True return (False, 'The ps execution module cannot be loaded: the psutil python module version {0} is less than 0.3.0'.format(psutil.version_info)) def _get_proc_cmdline(proc): ''' Returns the cmdline of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.cmdline() if PSUTIL2 else proc.cmdline) except (psutil.NoSuchProcess, psutil.AccessDenied): return [] def _get_proc_create_time(proc): ''' Returns the create_time of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.create_time() if PSUTIL2 else proc.create_time) except (psutil.NoSuchProcess, psutil.AccessDenied): return None def _get_proc_name(proc): ''' Returns the name of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.name() if PSUTIL2 else proc.name) except (psutil.NoSuchProcess, psutil.AccessDenied): return [] def _get_proc_status(proc): ''' Returns the status of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.status() if PSUTIL2 else proc.status) except (psutil.NoSuchProcess, psutil.AccessDenied): return None def _get_proc_username(proc): ''' Returns the username of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.username() if PSUTIL2 else proc.username) except (psutil.NoSuchProcess, psutil.AccessDenied, KeyError): return None def _get_proc_pid(proc): ''' Returns the pid of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' return proc.pid def top(num_processes=5, interval=3): ''' Return a list of top CPU consuming processes during the interval. num_processes = return the top N CPU consuming processes interval = the number of seconds to sample CPU usage over CLI Examples: .. code-block:: bash salt '*' ps.top salt '*' ps.top 5 10 ''' result = [] start_usage = {} for pid in psutil.pids(): try: process = psutil.Process(pid) user, system = process.cpu_times() except ValueError: user, system, _, _ = process.cpu_times() except psutil.NoSuchProcess: continue start_usage[process] = user + system time.sleep(interval) usage = set() for process, start in six.iteritems(start_usage): try: user, system = process.cpu_times() except ValueError: user, system, _, _ = process.cpu_times() except psutil.NoSuchProcess: continue now = user + system diff = now - start usage.add((diff, process)) for idx, (diff, process) in enumerate(reversed(sorted(usage))): if num_processes and idx >= num_processes: break if not _get_proc_cmdline(process): cmdline = _get_proc_name(process) else: cmdline = _get_proc_cmdline(process) info = {'cmd': cmdline, 'user': _get_proc_username(process), 'status': _get_proc_status(process), 'pid': _get_proc_pid(process), 'create_time': _get_proc_create_time(process), 'cpu': {}, 'mem': {}, } for key, value in six.iteritems(process.cpu_times()._asdict()): info['cpu'][key] = value for key, value in six.iteritems(process.memory_info()._asdict()): info['mem'][key] = value result.append(info) return result def get_pid_list(): ''' Return a list of process ids (PIDs) for all running processes. CLI Example: .. code-block:: bash salt '*' ps.get_pid_list ''' return psutil.pids() def proc_info(pid, attrs=None): ''' Return a dictionary of information for a process id (PID). CLI Example: .. code-block:: bash salt '*' ps.proc_info 2322 salt '*' ps.proc_info 2322 attrs='["pid", "name"]' pid PID of process to query. attrs Optional list of desired process attributes. The list of possible attributes can be found here: http://pythonhosted.org/psutil/#psutil.Process ''' try: proc = psutil.Process(pid) return proc.as_dict(attrs) except (psutil.NoSuchProcess, psutil.AccessDenied, AttributeError) as exc: raise CommandExecutionError(exc) def kill_pid(pid, signal=15): ''' Kill a process by PID. .. code-block:: bash salt 'minion' ps.kill_pid pid [signal=signal_number] pid PID of process to kill. signal Signal to send to the process. See manpage entry for kill for possible values. Default: 15 (SIGTERM). **Example:** Send SIGKILL to process with PID 2000: .. code-block:: bash salt 'minion' ps.kill_pid 2000 signal=9 ''' try: psutil.Process(pid).send_signal(signal) return True except psutil.NoSuchProcess: return False def pkill(pattern, user=None, signal=15, full=False): ''' Kill processes matching a pattern. .. code-block:: bash salt '*' ps.pkill pattern [user=username] [signal=signal_number] \\ [full=(true|false)] pattern Pattern to search for in the process list. user Limit matches to the given username. Default: All users. signal Signal to send to the process(es). See manpage entry for kill for possible values. Default: 15 (SIGTERM). full A boolean value indicating whether only the name of the command or the full command line should be matched against the pattern. **Examples:** Send SIGHUP to all httpd processes on all 'www' minions: .. code-block:: bash salt 'www.*' ps.pkill httpd signal=1 Send SIGKILL to all bash processes owned by user 'tom': .. code-block:: bash salt '*' ps.pkill bash signal=9 user=tom ''' killed = [] for proc in psutil.process_iter(): name_match = pattern in ' '.join(_get_proc_cmdline(proc)) if full \ else pattern in _get_proc_name(proc) user_match = True if user is None else user == _get_proc_username(proc) if name_match and user_match: try: proc.send_signal(signal) killed.append(_get_proc_pid(proc)) except psutil.NoSuchProcess: pass if not killed: return None else: return {'killed': killed} def pgrep(pattern, user=None, full=False, pattern_is_regex=False): ''' Return the pids for processes matching a pattern. If full is true, the full command line is searched for a match, otherwise only the name of the command is searched. .. code-block:: bash salt '*' ps.pgrep pattern [user=username] [full=(true|false)] pattern Pattern to search for in the process list. user Limit matches to the given username. Default: All users. full A boolean value indicating whether only the name of the command or the full command line should be matched against the pattern. pattern_is_regex This flag enables ps.pgrep to mirror the regex search functionality found in the pgrep command line utility. .. versionadded:: Neon **Examples:** Find all httpd processes on all 'www' minions: .. code-block:: bash salt 'www.*' ps.pgrep httpd Find all bash processes owned by user 'tom': .. code-block:: bash salt '*' ps.pgrep bash user=tom ''' procs = [] if pattern_is_regex: pattern = re.compile(str(pattern)) procs = [] for proc in psutil.process_iter(): if full: process_line = ' '.join(_get_proc_cmdline(proc)) else: process_line = _get_proc_name(proc) if pattern_is_regex: name_match = re.search(pattern, process_line) else: name_match = pattern in process_line user_match = True if user is None else user == _get_proc_username(proc) if name_match and user_match: procs.append(_get_proc_pid(proc)) return procs or None def cpu_percent(interval=0.1, per_cpu=False): ''' Return the percent of time the CPU is busy. interval the number of seconds to sample CPU usage over per_cpu if True return an array of CPU percent busy for each CPU, otherwise aggregate all percents into one number CLI Example: .. code-block:: bash salt '*' ps.cpu_percent ''' if per_cpu: result = list(psutil.cpu_percent(interval, True)) else: result = psutil.cpu_percent(interval) return result def cpu_times(per_cpu=False): ''' Return the percent of time the CPU spends in each state, e.g. user, system, idle, nice, iowait, irq, softirq. per_cpu if True return an array of percents for each CPU, otherwise aggregate all percents into one number CLI Example: .. code-block:: bash salt '*' ps.cpu_times ''' if per_cpu: result = [dict(times._asdict()) for times in psutil.cpu_times(True)] else: result = dict(psutil.cpu_times(per_cpu)._asdict()) return result def virtual_memory(): ''' .. versionadded:: 2014.7.0 Return a dict that describes statistics about system memory usage. .. note:: This function is only available in psutil version 0.6.0 and above. CLI Example: .. code-block:: bash salt '*' ps.virtual_memory ''' if psutil.version_info < (0, 6, 0): msg = 'virtual_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) return dict(psutil.virtual_memory()._asdict()) def swap_memory(): ''' .. versionadded:: 2014.7.0 Return a dict that describes swap memory statistics. .. note:: This function is only available in psutil version 0.6.0 and above. CLI Example: .. code-block:: bash salt '*' ps.swap_memory ''' if psutil.version_info < (0, 6, 0): msg = 'swap_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) return dict(psutil.swap_memory()._asdict()) def disk_partitions(all=False): ''' Return a list of disk partitions and their device, mount point, and filesystem type. all if set to False, only return local, physical partitions (hard disk, USB, CD/DVD partitions). If True, return all filesystems. CLI Example: .. code-block:: bash salt '*' ps.disk_partitions ''' result = [dict(partition._asdict()) for partition in psutil.disk_partitions(all)] return result def disk_usage(path): ''' Given a path, return a dict listing the total available space as well as the free space, and used space. CLI Example: .. code-block:: bash salt '*' ps.disk_usage /home ''' return dict(psutil.disk_usage(path)._asdict()) def disk_partition_usage(all=False): ''' Return a list of disk partitions plus the mount point, filesystem and usage statistics. CLI Example: .. code-block:: bash salt '*' ps.disk_partition_usage ''' result = disk_partitions(all) for partition in result: partition.update(disk_usage(partition['mountpoint'])) return result def total_physical_memory(): ''' Return the total number of bytes of physical memory. CLI Example: .. code-block:: bash salt '*' ps.total_physical_memory ''' if psutil.version_info < (0, 6, 0): msg = 'virtual_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) try: return psutil.virtual_memory().total except AttributeError: # TOTAL_PHYMEM is deprecated but with older psutil versions this is # needed as a fallback. return psutil.TOTAL_PHYMEM def num_cpus(): ''' Return the number of CPUs. CLI Example: .. code-block:: bash salt '*' ps.num_cpus ''' try: return psutil.cpu_count() except AttributeError: # NUM_CPUS is deprecated but with older psutil versions this is needed # as a fallback. return psutil.NUM_CPUS def boot_time(time_format=None): ''' Return the boot time in number of seconds since the epoch began. CLI Example: time_format Optionally specify a `strftime`_ format string. Use ``time_format='%c'`` to get a nicely-formatted locale specific date and time (i.e. ``Fri May 2 19:08:32 2014``). .. _strftime: https://docs.python.org/2/library/datetime.html#strftime-strptime-behavior .. versionadded:: 2014.1.4 .. code-block:: bash salt '*' ps.boot_time ''' try: b_time = int(psutil.boot_time()) except AttributeError: # get_boot_time() has been removed in newer psutil versions, and has # been replaced by boot_time() which provides the same information. b_time = int(psutil.boot_time()) if time_format: # Load epoch timestamp as a datetime.datetime object b_time = datetime.datetime.fromtimestamp(b_time) try: return b_time.strftime(time_format) except TypeError as exc: raise SaltInvocationError('Invalid format string: {0}'.format(exc)) return b_time def network_io_counters(interface=None): ''' Return network I/O statistics. CLI Example: .. code-block:: bash salt '*' ps.network_io_counters salt '*' ps.network_io_counters interface=eth0 ''' if not interface: return dict(psutil.net_io_counters()._asdict()) else: stats = psutil.net_io_counters(pernic=True) if interface in stats: return dict(stats[interface]._asdict()) else: return False def disk_io_counters(device=None): ''' Return disk I/O statistics. CLI Example: .. code-block:: bash salt '*' ps.disk_io_counters salt '*' ps.disk_io_counters device=sda1 ''' if not device: return dict(psutil.disk_io_counters()._asdict()) else: stats = psutil.disk_io_counters(perdisk=True) if device in stats: return dict(stats[device]._asdict()) else: return False def get_users(): ''' Return logged-in users. CLI Example: .. code-block:: bash salt '*' ps.get_users ''' try: recs = psutil.users() return [dict(x._asdict()) for x in recs] except AttributeError: # get_users is only present in psutil > v0.5.0 # try utmp try: import utmp # pylint: disable=import-error result = [] while True: rec = utmp.utmpaccess.getutent() if rec is None: return result elif rec[0] == 7: started = rec[8] if isinstance(started, tuple): started = started[0] result.append({'name': rec[4], 'terminal': rec[2], 'started': started, 'host': rec[5]}) except ImportError: return False @salt.utils.decorators.path.which('netstat') def netstat(name): ''' Retrieve the netstat information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.netstat apache2 ''' sanitize_name = six.text_type(name) netstat_infos = __salt__['cmd.run']("netstat -nap") found_infos = [] ret = [] for info in netstat_infos.splitlines(): if info.find(sanitize_name) != -1: found_infos.append(info) ret.extend([sanitize_name, found_infos]) return ret @salt.utils.decorators.path.which('ss') def ss(name): ''' Retrieve the ss information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.ss apache2 .. versionadded:: 2016.11.6 ''' sanitize_name = six.text_type(name) ss_infos = __salt__['cmd.run']("ss -neap") found_infos = [] ret = [] for info in ss_infos.splitlines(): if info.find(sanitize_name) != -1: found_infos.append(info) ret.extend([sanitize_name, found_infos]) return ret def psaux(name): ''' Retrieve information corresponding to a "ps aux" filtered with the given pattern. It could be just a name or a regular expression (using python search from "re" module). CLI Example: .. code-block:: bash salt '*' ps.psaux www-data.+apache2 ''' sanitize_name = six.text_type(name) pattern = re.compile(sanitize_name) salt_exception_pattern = re.compile("salt.+ps.psaux.+") ps_aux = __salt__['cmd.run']("ps aux") found_infos = [] ret = [] nb_lines = 0 for info in ps_aux.splitlines(): found = pattern.search(info) if found is not None: # remove 'salt' command from results if not salt_exception_pattern.search(info): nb_lines += 1 found_infos.append(info) pid_count = six.text_type(nb_lines) + " occurence(s)." ret = [] ret.extend([sanitize_name, found_infos, pid_count]) return ret
saltstack/salt
salt/modules/ps.py
netstat
python
def netstat(name): ''' Retrieve the netstat information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.netstat apache2 ''' sanitize_name = six.text_type(name) netstat_infos = __salt__['cmd.run']("netstat -nap") found_infos = [] ret = [] for info in netstat_infos.splitlines(): if info.find(sanitize_name) != -1: found_infos.append(info) ret.extend([sanitize_name, found_infos]) return ret
Retrieve the netstat information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.netstat apache2
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/ps.py#L681-L699
null
# -*- coding: utf-8 -*- ''' A salt interface to psutil, a system and process library. See http://code.google.com/p/psutil. :depends: - psutil Python module, version 0.3.0 or later - python-utmp package (optional) ''' # Import python libs from __future__ import absolute_import, unicode_literals, print_function import time import datetime import re # Import salt libs import salt.utils.data from salt.exceptions import SaltInvocationError, CommandExecutionError # Import third party libs import salt.utils.decorators.path from salt.ext import six # pylint: disable=import-error try: import salt.utils.psutil_compat as psutil HAS_PSUTIL = True PSUTIL2 = getattr(psutil, 'version_info', ()) >= (2, 0) except ImportError: HAS_PSUTIL = False # pylint: enable=import-error def __virtual__(): if not HAS_PSUTIL: return False, 'The ps module cannot be loaded: python module psutil not installed.' # Functions and attributes used in this execution module seem to have been # added as of psutil 0.3.0, from an inspection of the source code. Only # make this module available if the version of psutil is >= 0.3.0. Note # that this may need to be tweaked if we find post-0.3.0 versions which # also have problems running the functions in this execution module, but # most distributions have already moved to later versions (for example, # as of Dec. 2013 EPEL is on 0.6.1, Debian 7 is on 0.5.1, etc.). if psutil.version_info >= (0, 3, 0): return True return (False, 'The ps execution module cannot be loaded: the psutil python module version {0} is less than 0.3.0'.format(psutil.version_info)) def _get_proc_cmdline(proc): ''' Returns the cmdline of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.cmdline() if PSUTIL2 else proc.cmdline) except (psutil.NoSuchProcess, psutil.AccessDenied): return [] def _get_proc_create_time(proc): ''' Returns the create_time of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.create_time() if PSUTIL2 else proc.create_time) except (psutil.NoSuchProcess, psutil.AccessDenied): return None def _get_proc_name(proc): ''' Returns the name of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.name() if PSUTIL2 else proc.name) except (psutil.NoSuchProcess, psutil.AccessDenied): return [] def _get_proc_status(proc): ''' Returns the status of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.status() if PSUTIL2 else proc.status) except (psutil.NoSuchProcess, psutil.AccessDenied): return None def _get_proc_username(proc): ''' Returns the username of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.username() if PSUTIL2 else proc.username) except (psutil.NoSuchProcess, psutil.AccessDenied, KeyError): return None def _get_proc_pid(proc): ''' Returns the pid of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' return proc.pid def top(num_processes=5, interval=3): ''' Return a list of top CPU consuming processes during the interval. num_processes = return the top N CPU consuming processes interval = the number of seconds to sample CPU usage over CLI Examples: .. code-block:: bash salt '*' ps.top salt '*' ps.top 5 10 ''' result = [] start_usage = {} for pid in psutil.pids(): try: process = psutil.Process(pid) user, system = process.cpu_times() except ValueError: user, system, _, _ = process.cpu_times() except psutil.NoSuchProcess: continue start_usage[process] = user + system time.sleep(interval) usage = set() for process, start in six.iteritems(start_usage): try: user, system = process.cpu_times() except ValueError: user, system, _, _ = process.cpu_times() except psutil.NoSuchProcess: continue now = user + system diff = now - start usage.add((diff, process)) for idx, (diff, process) in enumerate(reversed(sorted(usage))): if num_processes and idx >= num_processes: break if not _get_proc_cmdline(process): cmdline = _get_proc_name(process) else: cmdline = _get_proc_cmdline(process) info = {'cmd': cmdline, 'user': _get_proc_username(process), 'status': _get_proc_status(process), 'pid': _get_proc_pid(process), 'create_time': _get_proc_create_time(process), 'cpu': {}, 'mem': {}, } for key, value in six.iteritems(process.cpu_times()._asdict()): info['cpu'][key] = value for key, value in six.iteritems(process.memory_info()._asdict()): info['mem'][key] = value result.append(info) return result def get_pid_list(): ''' Return a list of process ids (PIDs) for all running processes. CLI Example: .. code-block:: bash salt '*' ps.get_pid_list ''' return psutil.pids() def proc_info(pid, attrs=None): ''' Return a dictionary of information for a process id (PID). CLI Example: .. code-block:: bash salt '*' ps.proc_info 2322 salt '*' ps.proc_info 2322 attrs='["pid", "name"]' pid PID of process to query. attrs Optional list of desired process attributes. The list of possible attributes can be found here: http://pythonhosted.org/psutil/#psutil.Process ''' try: proc = psutil.Process(pid) return proc.as_dict(attrs) except (psutil.NoSuchProcess, psutil.AccessDenied, AttributeError) as exc: raise CommandExecutionError(exc) def kill_pid(pid, signal=15): ''' Kill a process by PID. .. code-block:: bash salt 'minion' ps.kill_pid pid [signal=signal_number] pid PID of process to kill. signal Signal to send to the process. See manpage entry for kill for possible values. Default: 15 (SIGTERM). **Example:** Send SIGKILL to process with PID 2000: .. code-block:: bash salt 'minion' ps.kill_pid 2000 signal=9 ''' try: psutil.Process(pid).send_signal(signal) return True except psutil.NoSuchProcess: return False def pkill(pattern, user=None, signal=15, full=False): ''' Kill processes matching a pattern. .. code-block:: bash salt '*' ps.pkill pattern [user=username] [signal=signal_number] \\ [full=(true|false)] pattern Pattern to search for in the process list. user Limit matches to the given username. Default: All users. signal Signal to send to the process(es). See manpage entry for kill for possible values. Default: 15 (SIGTERM). full A boolean value indicating whether only the name of the command or the full command line should be matched against the pattern. **Examples:** Send SIGHUP to all httpd processes on all 'www' minions: .. code-block:: bash salt 'www.*' ps.pkill httpd signal=1 Send SIGKILL to all bash processes owned by user 'tom': .. code-block:: bash salt '*' ps.pkill bash signal=9 user=tom ''' killed = [] for proc in psutil.process_iter(): name_match = pattern in ' '.join(_get_proc_cmdline(proc)) if full \ else pattern in _get_proc_name(proc) user_match = True if user is None else user == _get_proc_username(proc) if name_match and user_match: try: proc.send_signal(signal) killed.append(_get_proc_pid(proc)) except psutil.NoSuchProcess: pass if not killed: return None else: return {'killed': killed} def pgrep(pattern, user=None, full=False, pattern_is_regex=False): ''' Return the pids for processes matching a pattern. If full is true, the full command line is searched for a match, otherwise only the name of the command is searched. .. code-block:: bash salt '*' ps.pgrep pattern [user=username] [full=(true|false)] pattern Pattern to search for in the process list. user Limit matches to the given username. Default: All users. full A boolean value indicating whether only the name of the command or the full command line should be matched against the pattern. pattern_is_regex This flag enables ps.pgrep to mirror the regex search functionality found in the pgrep command line utility. .. versionadded:: Neon **Examples:** Find all httpd processes on all 'www' minions: .. code-block:: bash salt 'www.*' ps.pgrep httpd Find all bash processes owned by user 'tom': .. code-block:: bash salt '*' ps.pgrep bash user=tom ''' procs = [] if pattern_is_regex: pattern = re.compile(str(pattern)) procs = [] for proc in psutil.process_iter(): if full: process_line = ' '.join(_get_proc_cmdline(proc)) else: process_line = _get_proc_name(proc) if pattern_is_regex: name_match = re.search(pattern, process_line) else: name_match = pattern in process_line user_match = True if user is None else user == _get_proc_username(proc) if name_match and user_match: procs.append(_get_proc_pid(proc)) return procs or None def cpu_percent(interval=0.1, per_cpu=False): ''' Return the percent of time the CPU is busy. interval the number of seconds to sample CPU usage over per_cpu if True return an array of CPU percent busy for each CPU, otherwise aggregate all percents into one number CLI Example: .. code-block:: bash salt '*' ps.cpu_percent ''' if per_cpu: result = list(psutil.cpu_percent(interval, True)) else: result = psutil.cpu_percent(interval) return result def cpu_times(per_cpu=False): ''' Return the percent of time the CPU spends in each state, e.g. user, system, idle, nice, iowait, irq, softirq. per_cpu if True return an array of percents for each CPU, otherwise aggregate all percents into one number CLI Example: .. code-block:: bash salt '*' ps.cpu_times ''' if per_cpu: result = [dict(times._asdict()) for times in psutil.cpu_times(True)] else: result = dict(psutil.cpu_times(per_cpu)._asdict()) return result def virtual_memory(): ''' .. versionadded:: 2014.7.0 Return a dict that describes statistics about system memory usage. .. note:: This function is only available in psutil version 0.6.0 and above. CLI Example: .. code-block:: bash salt '*' ps.virtual_memory ''' if psutil.version_info < (0, 6, 0): msg = 'virtual_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) return dict(psutil.virtual_memory()._asdict()) def swap_memory(): ''' .. versionadded:: 2014.7.0 Return a dict that describes swap memory statistics. .. note:: This function is only available in psutil version 0.6.0 and above. CLI Example: .. code-block:: bash salt '*' ps.swap_memory ''' if psutil.version_info < (0, 6, 0): msg = 'swap_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) return dict(psutil.swap_memory()._asdict()) def disk_partitions(all=False): ''' Return a list of disk partitions and their device, mount point, and filesystem type. all if set to False, only return local, physical partitions (hard disk, USB, CD/DVD partitions). If True, return all filesystems. CLI Example: .. code-block:: bash salt '*' ps.disk_partitions ''' result = [dict(partition._asdict()) for partition in psutil.disk_partitions(all)] return result def disk_usage(path): ''' Given a path, return a dict listing the total available space as well as the free space, and used space. CLI Example: .. code-block:: bash salt '*' ps.disk_usage /home ''' return dict(psutil.disk_usage(path)._asdict()) def disk_partition_usage(all=False): ''' Return a list of disk partitions plus the mount point, filesystem and usage statistics. CLI Example: .. code-block:: bash salt '*' ps.disk_partition_usage ''' result = disk_partitions(all) for partition in result: partition.update(disk_usage(partition['mountpoint'])) return result def total_physical_memory(): ''' Return the total number of bytes of physical memory. CLI Example: .. code-block:: bash salt '*' ps.total_physical_memory ''' if psutil.version_info < (0, 6, 0): msg = 'virtual_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) try: return psutil.virtual_memory().total except AttributeError: # TOTAL_PHYMEM is deprecated but with older psutil versions this is # needed as a fallback. return psutil.TOTAL_PHYMEM def num_cpus(): ''' Return the number of CPUs. CLI Example: .. code-block:: bash salt '*' ps.num_cpus ''' try: return psutil.cpu_count() except AttributeError: # NUM_CPUS is deprecated but with older psutil versions this is needed # as a fallback. return psutil.NUM_CPUS def boot_time(time_format=None): ''' Return the boot time in number of seconds since the epoch began. CLI Example: time_format Optionally specify a `strftime`_ format string. Use ``time_format='%c'`` to get a nicely-formatted locale specific date and time (i.e. ``Fri May 2 19:08:32 2014``). .. _strftime: https://docs.python.org/2/library/datetime.html#strftime-strptime-behavior .. versionadded:: 2014.1.4 .. code-block:: bash salt '*' ps.boot_time ''' try: b_time = int(psutil.boot_time()) except AttributeError: # get_boot_time() has been removed in newer psutil versions, and has # been replaced by boot_time() which provides the same information. b_time = int(psutil.boot_time()) if time_format: # Load epoch timestamp as a datetime.datetime object b_time = datetime.datetime.fromtimestamp(b_time) try: return b_time.strftime(time_format) except TypeError as exc: raise SaltInvocationError('Invalid format string: {0}'.format(exc)) return b_time def network_io_counters(interface=None): ''' Return network I/O statistics. CLI Example: .. code-block:: bash salt '*' ps.network_io_counters salt '*' ps.network_io_counters interface=eth0 ''' if not interface: return dict(psutil.net_io_counters()._asdict()) else: stats = psutil.net_io_counters(pernic=True) if interface in stats: return dict(stats[interface]._asdict()) else: return False def disk_io_counters(device=None): ''' Return disk I/O statistics. CLI Example: .. code-block:: bash salt '*' ps.disk_io_counters salt '*' ps.disk_io_counters device=sda1 ''' if not device: return dict(psutil.disk_io_counters()._asdict()) else: stats = psutil.disk_io_counters(perdisk=True) if device in stats: return dict(stats[device]._asdict()) else: return False def get_users(): ''' Return logged-in users. CLI Example: .. code-block:: bash salt '*' ps.get_users ''' try: recs = psutil.users() return [dict(x._asdict()) for x in recs] except AttributeError: # get_users is only present in psutil > v0.5.0 # try utmp try: import utmp # pylint: disable=import-error result = [] while True: rec = utmp.utmpaccess.getutent() if rec is None: return result elif rec[0] == 7: started = rec[8] if isinstance(started, tuple): started = started[0] result.append({'name': rec[4], 'terminal': rec[2], 'started': started, 'host': rec[5]}) except ImportError: return False def lsof(name): ''' Retrieve the lsof information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.lsof apache2 ''' sanitize_name = six.text_type(name) lsof_infos = __salt__['cmd.run']("lsof -c " + sanitize_name) ret = [] ret.extend([sanitize_name, lsof_infos]) return ret @salt.utils.decorators.path.which('netstat') @salt.utils.decorators.path.which('ss') def ss(name): ''' Retrieve the ss information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.ss apache2 .. versionadded:: 2016.11.6 ''' sanitize_name = six.text_type(name) ss_infos = __salt__['cmd.run']("ss -neap") found_infos = [] ret = [] for info in ss_infos.splitlines(): if info.find(sanitize_name) != -1: found_infos.append(info) ret.extend([sanitize_name, found_infos]) return ret def psaux(name): ''' Retrieve information corresponding to a "ps aux" filtered with the given pattern. It could be just a name or a regular expression (using python search from "re" module). CLI Example: .. code-block:: bash salt '*' ps.psaux www-data.+apache2 ''' sanitize_name = six.text_type(name) pattern = re.compile(sanitize_name) salt_exception_pattern = re.compile("salt.+ps.psaux.+") ps_aux = __salt__['cmd.run']("ps aux") found_infos = [] ret = [] nb_lines = 0 for info in ps_aux.splitlines(): found = pattern.search(info) if found is not None: # remove 'salt' command from results if not salt_exception_pattern.search(info): nb_lines += 1 found_infos.append(info) pid_count = six.text_type(nb_lines) + " occurence(s)." ret = [] ret.extend([sanitize_name, found_infos, pid_count]) return ret
saltstack/salt
salt/modules/ps.py
ss
python
def ss(name): ''' Retrieve the ss information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.ss apache2 .. versionadded:: 2016.11.6 ''' sanitize_name = six.text_type(name) ss_infos = __salt__['cmd.run']("ss -neap") found_infos = [] ret = [] for info in ss_infos.splitlines(): if info.find(sanitize_name) != -1: found_infos.append(info) ret.extend([sanitize_name, found_infos]) return ret
Retrieve the ss information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.ss apache2 .. versionadded:: 2016.11.6
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/ps.py#L703-L724
null
# -*- coding: utf-8 -*- ''' A salt interface to psutil, a system and process library. See http://code.google.com/p/psutil. :depends: - psutil Python module, version 0.3.0 or later - python-utmp package (optional) ''' # Import python libs from __future__ import absolute_import, unicode_literals, print_function import time import datetime import re # Import salt libs import salt.utils.data from salt.exceptions import SaltInvocationError, CommandExecutionError # Import third party libs import salt.utils.decorators.path from salt.ext import six # pylint: disable=import-error try: import salt.utils.psutil_compat as psutil HAS_PSUTIL = True PSUTIL2 = getattr(psutil, 'version_info', ()) >= (2, 0) except ImportError: HAS_PSUTIL = False # pylint: enable=import-error def __virtual__(): if not HAS_PSUTIL: return False, 'The ps module cannot be loaded: python module psutil not installed.' # Functions and attributes used in this execution module seem to have been # added as of psutil 0.3.0, from an inspection of the source code. Only # make this module available if the version of psutil is >= 0.3.0. Note # that this may need to be tweaked if we find post-0.3.0 versions which # also have problems running the functions in this execution module, but # most distributions have already moved to later versions (for example, # as of Dec. 2013 EPEL is on 0.6.1, Debian 7 is on 0.5.1, etc.). if psutil.version_info >= (0, 3, 0): return True return (False, 'The ps execution module cannot be loaded: the psutil python module version {0} is less than 0.3.0'.format(psutil.version_info)) def _get_proc_cmdline(proc): ''' Returns the cmdline of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.cmdline() if PSUTIL2 else proc.cmdline) except (psutil.NoSuchProcess, psutil.AccessDenied): return [] def _get_proc_create_time(proc): ''' Returns the create_time of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.create_time() if PSUTIL2 else proc.create_time) except (psutil.NoSuchProcess, psutil.AccessDenied): return None def _get_proc_name(proc): ''' Returns the name of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.name() if PSUTIL2 else proc.name) except (psutil.NoSuchProcess, psutil.AccessDenied): return [] def _get_proc_status(proc): ''' Returns the status of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.status() if PSUTIL2 else proc.status) except (psutil.NoSuchProcess, psutil.AccessDenied): return None def _get_proc_username(proc): ''' Returns the username of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.username() if PSUTIL2 else proc.username) except (psutil.NoSuchProcess, psutil.AccessDenied, KeyError): return None def _get_proc_pid(proc): ''' Returns the pid of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' return proc.pid def top(num_processes=5, interval=3): ''' Return a list of top CPU consuming processes during the interval. num_processes = return the top N CPU consuming processes interval = the number of seconds to sample CPU usage over CLI Examples: .. code-block:: bash salt '*' ps.top salt '*' ps.top 5 10 ''' result = [] start_usage = {} for pid in psutil.pids(): try: process = psutil.Process(pid) user, system = process.cpu_times() except ValueError: user, system, _, _ = process.cpu_times() except psutil.NoSuchProcess: continue start_usage[process] = user + system time.sleep(interval) usage = set() for process, start in six.iteritems(start_usage): try: user, system = process.cpu_times() except ValueError: user, system, _, _ = process.cpu_times() except psutil.NoSuchProcess: continue now = user + system diff = now - start usage.add((diff, process)) for idx, (diff, process) in enumerate(reversed(sorted(usage))): if num_processes and idx >= num_processes: break if not _get_proc_cmdline(process): cmdline = _get_proc_name(process) else: cmdline = _get_proc_cmdline(process) info = {'cmd': cmdline, 'user': _get_proc_username(process), 'status': _get_proc_status(process), 'pid': _get_proc_pid(process), 'create_time': _get_proc_create_time(process), 'cpu': {}, 'mem': {}, } for key, value in six.iteritems(process.cpu_times()._asdict()): info['cpu'][key] = value for key, value in six.iteritems(process.memory_info()._asdict()): info['mem'][key] = value result.append(info) return result def get_pid_list(): ''' Return a list of process ids (PIDs) for all running processes. CLI Example: .. code-block:: bash salt '*' ps.get_pid_list ''' return psutil.pids() def proc_info(pid, attrs=None): ''' Return a dictionary of information for a process id (PID). CLI Example: .. code-block:: bash salt '*' ps.proc_info 2322 salt '*' ps.proc_info 2322 attrs='["pid", "name"]' pid PID of process to query. attrs Optional list of desired process attributes. The list of possible attributes can be found here: http://pythonhosted.org/psutil/#psutil.Process ''' try: proc = psutil.Process(pid) return proc.as_dict(attrs) except (psutil.NoSuchProcess, psutil.AccessDenied, AttributeError) as exc: raise CommandExecutionError(exc) def kill_pid(pid, signal=15): ''' Kill a process by PID. .. code-block:: bash salt 'minion' ps.kill_pid pid [signal=signal_number] pid PID of process to kill. signal Signal to send to the process. See manpage entry for kill for possible values. Default: 15 (SIGTERM). **Example:** Send SIGKILL to process with PID 2000: .. code-block:: bash salt 'minion' ps.kill_pid 2000 signal=9 ''' try: psutil.Process(pid).send_signal(signal) return True except psutil.NoSuchProcess: return False def pkill(pattern, user=None, signal=15, full=False): ''' Kill processes matching a pattern. .. code-block:: bash salt '*' ps.pkill pattern [user=username] [signal=signal_number] \\ [full=(true|false)] pattern Pattern to search for in the process list. user Limit matches to the given username. Default: All users. signal Signal to send to the process(es). See manpage entry for kill for possible values. Default: 15 (SIGTERM). full A boolean value indicating whether only the name of the command or the full command line should be matched against the pattern. **Examples:** Send SIGHUP to all httpd processes on all 'www' minions: .. code-block:: bash salt 'www.*' ps.pkill httpd signal=1 Send SIGKILL to all bash processes owned by user 'tom': .. code-block:: bash salt '*' ps.pkill bash signal=9 user=tom ''' killed = [] for proc in psutil.process_iter(): name_match = pattern in ' '.join(_get_proc_cmdline(proc)) if full \ else pattern in _get_proc_name(proc) user_match = True if user is None else user == _get_proc_username(proc) if name_match and user_match: try: proc.send_signal(signal) killed.append(_get_proc_pid(proc)) except psutil.NoSuchProcess: pass if not killed: return None else: return {'killed': killed} def pgrep(pattern, user=None, full=False, pattern_is_regex=False): ''' Return the pids for processes matching a pattern. If full is true, the full command line is searched for a match, otherwise only the name of the command is searched. .. code-block:: bash salt '*' ps.pgrep pattern [user=username] [full=(true|false)] pattern Pattern to search for in the process list. user Limit matches to the given username. Default: All users. full A boolean value indicating whether only the name of the command or the full command line should be matched against the pattern. pattern_is_regex This flag enables ps.pgrep to mirror the regex search functionality found in the pgrep command line utility. .. versionadded:: Neon **Examples:** Find all httpd processes on all 'www' minions: .. code-block:: bash salt 'www.*' ps.pgrep httpd Find all bash processes owned by user 'tom': .. code-block:: bash salt '*' ps.pgrep bash user=tom ''' procs = [] if pattern_is_regex: pattern = re.compile(str(pattern)) procs = [] for proc in psutil.process_iter(): if full: process_line = ' '.join(_get_proc_cmdline(proc)) else: process_line = _get_proc_name(proc) if pattern_is_regex: name_match = re.search(pattern, process_line) else: name_match = pattern in process_line user_match = True if user is None else user == _get_proc_username(proc) if name_match and user_match: procs.append(_get_proc_pid(proc)) return procs or None def cpu_percent(interval=0.1, per_cpu=False): ''' Return the percent of time the CPU is busy. interval the number of seconds to sample CPU usage over per_cpu if True return an array of CPU percent busy for each CPU, otherwise aggregate all percents into one number CLI Example: .. code-block:: bash salt '*' ps.cpu_percent ''' if per_cpu: result = list(psutil.cpu_percent(interval, True)) else: result = psutil.cpu_percent(interval) return result def cpu_times(per_cpu=False): ''' Return the percent of time the CPU spends in each state, e.g. user, system, idle, nice, iowait, irq, softirq. per_cpu if True return an array of percents for each CPU, otherwise aggregate all percents into one number CLI Example: .. code-block:: bash salt '*' ps.cpu_times ''' if per_cpu: result = [dict(times._asdict()) for times in psutil.cpu_times(True)] else: result = dict(psutil.cpu_times(per_cpu)._asdict()) return result def virtual_memory(): ''' .. versionadded:: 2014.7.0 Return a dict that describes statistics about system memory usage. .. note:: This function is only available in psutil version 0.6.0 and above. CLI Example: .. code-block:: bash salt '*' ps.virtual_memory ''' if psutil.version_info < (0, 6, 0): msg = 'virtual_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) return dict(psutil.virtual_memory()._asdict()) def swap_memory(): ''' .. versionadded:: 2014.7.0 Return a dict that describes swap memory statistics. .. note:: This function is only available in psutil version 0.6.0 and above. CLI Example: .. code-block:: bash salt '*' ps.swap_memory ''' if psutil.version_info < (0, 6, 0): msg = 'swap_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) return dict(psutil.swap_memory()._asdict()) def disk_partitions(all=False): ''' Return a list of disk partitions and their device, mount point, and filesystem type. all if set to False, only return local, physical partitions (hard disk, USB, CD/DVD partitions). If True, return all filesystems. CLI Example: .. code-block:: bash salt '*' ps.disk_partitions ''' result = [dict(partition._asdict()) for partition in psutil.disk_partitions(all)] return result def disk_usage(path): ''' Given a path, return a dict listing the total available space as well as the free space, and used space. CLI Example: .. code-block:: bash salt '*' ps.disk_usage /home ''' return dict(psutil.disk_usage(path)._asdict()) def disk_partition_usage(all=False): ''' Return a list of disk partitions plus the mount point, filesystem and usage statistics. CLI Example: .. code-block:: bash salt '*' ps.disk_partition_usage ''' result = disk_partitions(all) for partition in result: partition.update(disk_usage(partition['mountpoint'])) return result def total_physical_memory(): ''' Return the total number of bytes of physical memory. CLI Example: .. code-block:: bash salt '*' ps.total_physical_memory ''' if psutil.version_info < (0, 6, 0): msg = 'virtual_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) try: return psutil.virtual_memory().total except AttributeError: # TOTAL_PHYMEM is deprecated but with older psutil versions this is # needed as a fallback. return psutil.TOTAL_PHYMEM def num_cpus(): ''' Return the number of CPUs. CLI Example: .. code-block:: bash salt '*' ps.num_cpus ''' try: return psutil.cpu_count() except AttributeError: # NUM_CPUS is deprecated but with older psutil versions this is needed # as a fallback. return psutil.NUM_CPUS def boot_time(time_format=None): ''' Return the boot time in number of seconds since the epoch began. CLI Example: time_format Optionally specify a `strftime`_ format string. Use ``time_format='%c'`` to get a nicely-formatted locale specific date and time (i.e. ``Fri May 2 19:08:32 2014``). .. _strftime: https://docs.python.org/2/library/datetime.html#strftime-strptime-behavior .. versionadded:: 2014.1.4 .. code-block:: bash salt '*' ps.boot_time ''' try: b_time = int(psutil.boot_time()) except AttributeError: # get_boot_time() has been removed in newer psutil versions, and has # been replaced by boot_time() which provides the same information. b_time = int(psutil.boot_time()) if time_format: # Load epoch timestamp as a datetime.datetime object b_time = datetime.datetime.fromtimestamp(b_time) try: return b_time.strftime(time_format) except TypeError as exc: raise SaltInvocationError('Invalid format string: {0}'.format(exc)) return b_time def network_io_counters(interface=None): ''' Return network I/O statistics. CLI Example: .. code-block:: bash salt '*' ps.network_io_counters salt '*' ps.network_io_counters interface=eth0 ''' if not interface: return dict(psutil.net_io_counters()._asdict()) else: stats = psutil.net_io_counters(pernic=True) if interface in stats: return dict(stats[interface]._asdict()) else: return False def disk_io_counters(device=None): ''' Return disk I/O statistics. CLI Example: .. code-block:: bash salt '*' ps.disk_io_counters salt '*' ps.disk_io_counters device=sda1 ''' if not device: return dict(psutil.disk_io_counters()._asdict()) else: stats = psutil.disk_io_counters(perdisk=True) if device in stats: return dict(stats[device]._asdict()) else: return False def get_users(): ''' Return logged-in users. CLI Example: .. code-block:: bash salt '*' ps.get_users ''' try: recs = psutil.users() return [dict(x._asdict()) for x in recs] except AttributeError: # get_users is only present in psutil > v0.5.0 # try utmp try: import utmp # pylint: disable=import-error result = [] while True: rec = utmp.utmpaccess.getutent() if rec is None: return result elif rec[0] == 7: started = rec[8] if isinstance(started, tuple): started = started[0] result.append({'name': rec[4], 'terminal': rec[2], 'started': started, 'host': rec[5]}) except ImportError: return False def lsof(name): ''' Retrieve the lsof information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.lsof apache2 ''' sanitize_name = six.text_type(name) lsof_infos = __salt__['cmd.run']("lsof -c " + sanitize_name) ret = [] ret.extend([sanitize_name, lsof_infos]) return ret @salt.utils.decorators.path.which('netstat') def netstat(name): ''' Retrieve the netstat information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.netstat apache2 ''' sanitize_name = six.text_type(name) netstat_infos = __salt__['cmd.run']("netstat -nap") found_infos = [] ret = [] for info in netstat_infos.splitlines(): if info.find(sanitize_name) != -1: found_infos.append(info) ret.extend([sanitize_name, found_infos]) return ret @salt.utils.decorators.path.which('ss') def psaux(name): ''' Retrieve information corresponding to a "ps aux" filtered with the given pattern. It could be just a name or a regular expression (using python search from "re" module). CLI Example: .. code-block:: bash salt '*' ps.psaux www-data.+apache2 ''' sanitize_name = six.text_type(name) pattern = re.compile(sanitize_name) salt_exception_pattern = re.compile("salt.+ps.psaux.+") ps_aux = __salt__['cmd.run']("ps aux") found_infos = [] ret = [] nb_lines = 0 for info in ps_aux.splitlines(): found = pattern.search(info) if found is not None: # remove 'salt' command from results if not salt_exception_pattern.search(info): nb_lines += 1 found_infos.append(info) pid_count = six.text_type(nb_lines) + " occurence(s)." ret = [] ret.extend([sanitize_name, found_infos, pid_count]) return ret
saltstack/salt
salt/modules/ps.py
psaux
python
def psaux(name): ''' Retrieve information corresponding to a "ps aux" filtered with the given pattern. It could be just a name or a regular expression (using python search from "re" module). CLI Example: .. code-block:: bash salt '*' ps.psaux www-data.+apache2 ''' sanitize_name = six.text_type(name) pattern = re.compile(sanitize_name) salt_exception_pattern = re.compile("salt.+ps.psaux.+") ps_aux = __salt__['cmd.run']("ps aux") found_infos = [] ret = [] nb_lines = 0 for info in ps_aux.splitlines(): found = pattern.search(info) if found is not None: # remove 'salt' command from results if not salt_exception_pattern.search(info): nb_lines += 1 found_infos.append(info) pid_count = six.text_type(nb_lines) + " occurence(s)." ret = [] ret.extend([sanitize_name, found_infos, pid_count]) return ret
Retrieve information corresponding to a "ps aux" filtered with the given pattern. It could be just a name or a regular expression (using python search from "re" module). CLI Example: .. code-block:: bash salt '*' ps.psaux www-data.+apache2
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/ps.py#L727-L756
null
# -*- coding: utf-8 -*- ''' A salt interface to psutil, a system and process library. See http://code.google.com/p/psutil. :depends: - psutil Python module, version 0.3.0 or later - python-utmp package (optional) ''' # Import python libs from __future__ import absolute_import, unicode_literals, print_function import time import datetime import re # Import salt libs import salt.utils.data from salt.exceptions import SaltInvocationError, CommandExecutionError # Import third party libs import salt.utils.decorators.path from salt.ext import six # pylint: disable=import-error try: import salt.utils.psutil_compat as psutil HAS_PSUTIL = True PSUTIL2 = getattr(psutil, 'version_info', ()) >= (2, 0) except ImportError: HAS_PSUTIL = False # pylint: enable=import-error def __virtual__(): if not HAS_PSUTIL: return False, 'The ps module cannot be loaded: python module psutil not installed.' # Functions and attributes used in this execution module seem to have been # added as of psutil 0.3.0, from an inspection of the source code. Only # make this module available if the version of psutil is >= 0.3.0. Note # that this may need to be tweaked if we find post-0.3.0 versions which # also have problems running the functions in this execution module, but # most distributions have already moved to later versions (for example, # as of Dec. 2013 EPEL is on 0.6.1, Debian 7 is on 0.5.1, etc.). if psutil.version_info >= (0, 3, 0): return True return (False, 'The ps execution module cannot be loaded: the psutil python module version {0} is less than 0.3.0'.format(psutil.version_info)) def _get_proc_cmdline(proc): ''' Returns the cmdline of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.cmdline() if PSUTIL2 else proc.cmdline) except (psutil.NoSuchProcess, psutil.AccessDenied): return [] def _get_proc_create_time(proc): ''' Returns the create_time of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.create_time() if PSUTIL2 else proc.create_time) except (psutil.NoSuchProcess, psutil.AccessDenied): return None def _get_proc_name(proc): ''' Returns the name of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.name() if PSUTIL2 else proc.name) except (psutil.NoSuchProcess, psutil.AccessDenied): return [] def _get_proc_status(proc): ''' Returns the status of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.status() if PSUTIL2 else proc.status) except (psutil.NoSuchProcess, psutil.AccessDenied): return None def _get_proc_username(proc): ''' Returns the username of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' try: return salt.utils.data.decode(proc.username() if PSUTIL2 else proc.username) except (psutil.NoSuchProcess, psutil.AccessDenied, KeyError): return None def _get_proc_pid(proc): ''' Returns the pid of a Process instance. It's backward compatible with < 2.0 versions of psutil. ''' return proc.pid def top(num_processes=5, interval=3): ''' Return a list of top CPU consuming processes during the interval. num_processes = return the top N CPU consuming processes interval = the number of seconds to sample CPU usage over CLI Examples: .. code-block:: bash salt '*' ps.top salt '*' ps.top 5 10 ''' result = [] start_usage = {} for pid in psutil.pids(): try: process = psutil.Process(pid) user, system = process.cpu_times() except ValueError: user, system, _, _ = process.cpu_times() except psutil.NoSuchProcess: continue start_usage[process] = user + system time.sleep(interval) usage = set() for process, start in six.iteritems(start_usage): try: user, system = process.cpu_times() except ValueError: user, system, _, _ = process.cpu_times() except psutil.NoSuchProcess: continue now = user + system diff = now - start usage.add((diff, process)) for idx, (diff, process) in enumerate(reversed(sorted(usage))): if num_processes and idx >= num_processes: break if not _get_proc_cmdline(process): cmdline = _get_proc_name(process) else: cmdline = _get_proc_cmdline(process) info = {'cmd': cmdline, 'user': _get_proc_username(process), 'status': _get_proc_status(process), 'pid': _get_proc_pid(process), 'create_time': _get_proc_create_time(process), 'cpu': {}, 'mem': {}, } for key, value in six.iteritems(process.cpu_times()._asdict()): info['cpu'][key] = value for key, value in six.iteritems(process.memory_info()._asdict()): info['mem'][key] = value result.append(info) return result def get_pid_list(): ''' Return a list of process ids (PIDs) for all running processes. CLI Example: .. code-block:: bash salt '*' ps.get_pid_list ''' return psutil.pids() def proc_info(pid, attrs=None): ''' Return a dictionary of information for a process id (PID). CLI Example: .. code-block:: bash salt '*' ps.proc_info 2322 salt '*' ps.proc_info 2322 attrs='["pid", "name"]' pid PID of process to query. attrs Optional list of desired process attributes. The list of possible attributes can be found here: http://pythonhosted.org/psutil/#psutil.Process ''' try: proc = psutil.Process(pid) return proc.as_dict(attrs) except (psutil.NoSuchProcess, psutil.AccessDenied, AttributeError) as exc: raise CommandExecutionError(exc) def kill_pid(pid, signal=15): ''' Kill a process by PID. .. code-block:: bash salt 'minion' ps.kill_pid pid [signal=signal_number] pid PID of process to kill. signal Signal to send to the process. See manpage entry for kill for possible values. Default: 15 (SIGTERM). **Example:** Send SIGKILL to process with PID 2000: .. code-block:: bash salt 'minion' ps.kill_pid 2000 signal=9 ''' try: psutil.Process(pid).send_signal(signal) return True except psutil.NoSuchProcess: return False def pkill(pattern, user=None, signal=15, full=False): ''' Kill processes matching a pattern. .. code-block:: bash salt '*' ps.pkill pattern [user=username] [signal=signal_number] \\ [full=(true|false)] pattern Pattern to search for in the process list. user Limit matches to the given username. Default: All users. signal Signal to send to the process(es). See manpage entry for kill for possible values. Default: 15 (SIGTERM). full A boolean value indicating whether only the name of the command or the full command line should be matched against the pattern. **Examples:** Send SIGHUP to all httpd processes on all 'www' minions: .. code-block:: bash salt 'www.*' ps.pkill httpd signal=1 Send SIGKILL to all bash processes owned by user 'tom': .. code-block:: bash salt '*' ps.pkill bash signal=9 user=tom ''' killed = [] for proc in psutil.process_iter(): name_match = pattern in ' '.join(_get_proc_cmdline(proc)) if full \ else pattern in _get_proc_name(proc) user_match = True if user is None else user == _get_proc_username(proc) if name_match and user_match: try: proc.send_signal(signal) killed.append(_get_proc_pid(proc)) except psutil.NoSuchProcess: pass if not killed: return None else: return {'killed': killed} def pgrep(pattern, user=None, full=False, pattern_is_regex=False): ''' Return the pids for processes matching a pattern. If full is true, the full command line is searched for a match, otherwise only the name of the command is searched. .. code-block:: bash salt '*' ps.pgrep pattern [user=username] [full=(true|false)] pattern Pattern to search for in the process list. user Limit matches to the given username. Default: All users. full A boolean value indicating whether only the name of the command or the full command line should be matched against the pattern. pattern_is_regex This flag enables ps.pgrep to mirror the regex search functionality found in the pgrep command line utility. .. versionadded:: Neon **Examples:** Find all httpd processes on all 'www' minions: .. code-block:: bash salt 'www.*' ps.pgrep httpd Find all bash processes owned by user 'tom': .. code-block:: bash salt '*' ps.pgrep bash user=tom ''' procs = [] if pattern_is_regex: pattern = re.compile(str(pattern)) procs = [] for proc in psutil.process_iter(): if full: process_line = ' '.join(_get_proc_cmdline(proc)) else: process_line = _get_proc_name(proc) if pattern_is_regex: name_match = re.search(pattern, process_line) else: name_match = pattern in process_line user_match = True if user is None else user == _get_proc_username(proc) if name_match and user_match: procs.append(_get_proc_pid(proc)) return procs or None def cpu_percent(interval=0.1, per_cpu=False): ''' Return the percent of time the CPU is busy. interval the number of seconds to sample CPU usage over per_cpu if True return an array of CPU percent busy for each CPU, otherwise aggregate all percents into one number CLI Example: .. code-block:: bash salt '*' ps.cpu_percent ''' if per_cpu: result = list(psutil.cpu_percent(interval, True)) else: result = psutil.cpu_percent(interval) return result def cpu_times(per_cpu=False): ''' Return the percent of time the CPU spends in each state, e.g. user, system, idle, nice, iowait, irq, softirq. per_cpu if True return an array of percents for each CPU, otherwise aggregate all percents into one number CLI Example: .. code-block:: bash salt '*' ps.cpu_times ''' if per_cpu: result = [dict(times._asdict()) for times in psutil.cpu_times(True)] else: result = dict(psutil.cpu_times(per_cpu)._asdict()) return result def virtual_memory(): ''' .. versionadded:: 2014.7.0 Return a dict that describes statistics about system memory usage. .. note:: This function is only available in psutil version 0.6.0 and above. CLI Example: .. code-block:: bash salt '*' ps.virtual_memory ''' if psutil.version_info < (0, 6, 0): msg = 'virtual_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) return dict(psutil.virtual_memory()._asdict()) def swap_memory(): ''' .. versionadded:: 2014.7.0 Return a dict that describes swap memory statistics. .. note:: This function is only available in psutil version 0.6.0 and above. CLI Example: .. code-block:: bash salt '*' ps.swap_memory ''' if psutil.version_info < (0, 6, 0): msg = 'swap_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) return dict(psutil.swap_memory()._asdict()) def disk_partitions(all=False): ''' Return a list of disk partitions and their device, mount point, and filesystem type. all if set to False, only return local, physical partitions (hard disk, USB, CD/DVD partitions). If True, return all filesystems. CLI Example: .. code-block:: bash salt '*' ps.disk_partitions ''' result = [dict(partition._asdict()) for partition in psutil.disk_partitions(all)] return result def disk_usage(path): ''' Given a path, return a dict listing the total available space as well as the free space, and used space. CLI Example: .. code-block:: bash salt '*' ps.disk_usage /home ''' return dict(psutil.disk_usage(path)._asdict()) def disk_partition_usage(all=False): ''' Return a list of disk partitions plus the mount point, filesystem and usage statistics. CLI Example: .. code-block:: bash salt '*' ps.disk_partition_usage ''' result = disk_partitions(all) for partition in result: partition.update(disk_usage(partition['mountpoint'])) return result def total_physical_memory(): ''' Return the total number of bytes of physical memory. CLI Example: .. code-block:: bash salt '*' ps.total_physical_memory ''' if psutil.version_info < (0, 6, 0): msg = 'virtual_memory is only available in psutil 0.6.0 or greater' raise CommandExecutionError(msg) try: return psutil.virtual_memory().total except AttributeError: # TOTAL_PHYMEM is deprecated but with older psutil versions this is # needed as a fallback. return psutil.TOTAL_PHYMEM def num_cpus(): ''' Return the number of CPUs. CLI Example: .. code-block:: bash salt '*' ps.num_cpus ''' try: return psutil.cpu_count() except AttributeError: # NUM_CPUS is deprecated but with older psutil versions this is needed # as a fallback. return psutil.NUM_CPUS def boot_time(time_format=None): ''' Return the boot time in number of seconds since the epoch began. CLI Example: time_format Optionally specify a `strftime`_ format string. Use ``time_format='%c'`` to get a nicely-formatted locale specific date and time (i.e. ``Fri May 2 19:08:32 2014``). .. _strftime: https://docs.python.org/2/library/datetime.html#strftime-strptime-behavior .. versionadded:: 2014.1.4 .. code-block:: bash salt '*' ps.boot_time ''' try: b_time = int(psutil.boot_time()) except AttributeError: # get_boot_time() has been removed in newer psutil versions, and has # been replaced by boot_time() which provides the same information. b_time = int(psutil.boot_time()) if time_format: # Load epoch timestamp as a datetime.datetime object b_time = datetime.datetime.fromtimestamp(b_time) try: return b_time.strftime(time_format) except TypeError as exc: raise SaltInvocationError('Invalid format string: {0}'.format(exc)) return b_time def network_io_counters(interface=None): ''' Return network I/O statistics. CLI Example: .. code-block:: bash salt '*' ps.network_io_counters salt '*' ps.network_io_counters interface=eth0 ''' if not interface: return dict(psutil.net_io_counters()._asdict()) else: stats = psutil.net_io_counters(pernic=True) if interface in stats: return dict(stats[interface]._asdict()) else: return False def disk_io_counters(device=None): ''' Return disk I/O statistics. CLI Example: .. code-block:: bash salt '*' ps.disk_io_counters salt '*' ps.disk_io_counters device=sda1 ''' if not device: return dict(psutil.disk_io_counters()._asdict()) else: stats = psutil.disk_io_counters(perdisk=True) if device in stats: return dict(stats[device]._asdict()) else: return False def get_users(): ''' Return logged-in users. CLI Example: .. code-block:: bash salt '*' ps.get_users ''' try: recs = psutil.users() return [dict(x._asdict()) for x in recs] except AttributeError: # get_users is only present in psutil > v0.5.0 # try utmp try: import utmp # pylint: disable=import-error result = [] while True: rec = utmp.utmpaccess.getutent() if rec is None: return result elif rec[0] == 7: started = rec[8] if isinstance(started, tuple): started = started[0] result.append({'name': rec[4], 'terminal': rec[2], 'started': started, 'host': rec[5]}) except ImportError: return False def lsof(name): ''' Retrieve the lsof information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.lsof apache2 ''' sanitize_name = six.text_type(name) lsof_infos = __salt__['cmd.run']("lsof -c " + sanitize_name) ret = [] ret.extend([sanitize_name, lsof_infos]) return ret @salt.utils.decorators.path.which('netstat') def netstat(name): ''' Retrieve the netstat information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.netstat apache2 ''' sanitize_name = six.text_type(name) netstat_infos = __salt__['cmd.run']("netstat -nap") found_infos = [] ret = [] for info in netstat_infos.splitlines(): if info.find(sanitize_name) != -1: found_infos.append(info) ret.extend([sanitize_name, found_infos]) return ret @salt.utils.decorators.path.which('ss') def ss(name): ''' Retrieve the ss information of the given process name. CLI Example: .. code-block:: bash salt '*' ps.ss apache2 .. versionadded:: 2016.11.6 ''' sanitize_name = six.text_type(name) ss_infos = __salt__['cmd.run']("ss -neap") found_infos = [] ret = [] for info in ss_infos.splitlines(): if info.find(sanitize_name) != -1: found_infos.append(info) ret.extend([sanitize_name, found_infos]) return ret
saltstack/salt
salt/states/pagerduty_service.py
present
python
def present(profile='pagerduty', subdomain=None, api_key=None, **kwargs): ''' Ensure pagerduty service exists. This method accepts as arguments everything defined in https://developer.pagerduty.com/documentation/rest/services/create Note that many arguments are mutually exclusive, depending on the "type" argument. Examples: .. code-block:: yaml # create a PagerDuty email service at test-email@DOMAIN.pagerduty.com ensure generic email service exists: pagerduty_service.present: - name: my email service - service: description: "email service controlled by salt" escalation_policy_id: "my escalation policy" type: "generic_email" service_key: "test-email" .. code-block:: yaml # create a pagerduty service using cloudwatch integration ensure my cloudwatch service exists: pagerduty_service.present: - name: my cloudwatch service - service: escalation_policy_id: "my escalation policy" type: aws_cloudwatch description: "my cloudwatch service controlled by salt" ''' # TODO: aws_cloudwatch type should be integrated with boto_sns # for convenience, we accept id, name, or email for users # and we accept the id or name for schedules kwargs['service']['name'] = kwargs['name'] # make args mirror PD API structure escalation_policy_id = kwargs['service']['escalation_policy_id'] escalation_policy = __salt__['pagerduty_util.get_resource']('escalation_policies', escalation_policy_id, ['name', 'id'], profile=profile, subdomain=subdomain, api_key=api_key) if escalation_policy: kwargs['service']['escalation_policy_id'] = escalation_policy['id'] r = __salt__['pagerduty_util.resource_present']('services', ['name', 'id'], _diff, profile, subdomain, api_key, **kwargs) return r
Ensure pagerduty service exists. This method accepts as arguments everything defined in https://developer.pagerduty.com/documentation/rest/services/create Note that many arguments are mutually exclusive, depending on the "type" argument. Examples: .. code-block:: yaml # create a PagerDuty email service at test-email@DOMAIN.pagerduty.com ensure generic email service exists: pagerduty_service.present: - name: my email service - service: description: "email service controlled by salt" escalation_policy_id: "my escalation policy" type: "generic_email" service_key: "test-email" .. code-block:: yaml # create a pagerduty service using cloudwatch integration ensure my cloudwatch service exists: pagerduty_service.present: - name: my cloudwatch service - service: escalation_policy_id: "my escalation policy" type: aws_cloudwatch description: "my cloudwatch service controlled by salt"
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/pagerduty_service.py#L30-L84
null
# -*- coding: utf-8 -*- ''' Manage PagerDuty services Escalation policies can be referenced by pagerduty ID or by namea. For example: .. code-block:: yaml ensure test service pagerduty_service.present: - name: 'my service' - escalation_policy_id: 'my escalation policy' - type: nagios ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals def __virtual__(): ''' Only load if the pygerduty module is available in __salt__ ''' return 'pagerduty_service' if 'pagerduty_util.get_resource' in __salt__ else False def absent(profile='pagerduty', subdomain=None, api_key=None, **kwargs): ''' Ensure a pagerduty service does not exist. Name can be the service name or pagerduty service id. ''' r = __salt__['pagerduty_util.resource_absent']('services', ['name', 'id'], profile, subdomain, api_key, **kwargs) return r def _diff(state_data, resource_object): '''helper method to compare salt state info with the PagerDuty API json structure, and determine if we need to update. returns the dict to pass to the PD API to perform the update, or empty dict if no update. ''' objects_differ = None for k, v in state_data['service'].items(): if k == 'escalation_policy_id': resource_value = resource_object['escalation_policy']['id'] elif k == 'service_key': # service_key on create must 'foo' but the GET will return 'foo@bar.pagerduty.com' resource_value = resource_object['service_key'] if '@' in resource_value: resource_value = resource_value[0:resource_value.find('@')] else: resource_value = resource_object[k] if v != resource_value: objects_differ = '{0} {1} {2}'.format(k, v, resource_value) break if objects_differ: return state_data else: return {}
saltstack/salt
salt/states/pagerduty_service.py
absent
python
def absent(profile='pagerduty', subdomain=None, api_key=None, **kwargs): ''' Ensure a pagerduty service does not exist. Name can be the service name or pagerduty service id. ''' r = __salt__['pagerduty_util.resource_absent']('services', ['name', 'id'], profile, subdomain, api_key, **kwargs) return r
Ensure a pagerduty service does not exist. Name can be the service name or pagerduty service id.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/pagerduty_service.py#L87-L98
null
# -*- coding: utf-8 -*- ''' Manage PagerDuty services Escalation policies can be referenced by pagerduty ID or by namea. For example: .. code-block:: yaml ensure test service pagerduty_service.present: - name: 'my service' - escalation_policy_id: 'my escalation policy' - type: nagios ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals def __virtual__(): ''' Only load if the pygerduty module is available in __salt__ ''' return 'pagerduty_service' if 'pagerduty_util.get_resource' in __salt__ else False def present(profile='pagerduty', subdomain=None, api_key=None, **kwargs): ''' Ensure pagerduty service exists. This method accepts as arguments everything defined in https://developer.pagerduty.com/documentation/rest/services/create Note that many arguments are mutually exclusive, depending on the "type" argument. Examples: .. code-block:: yaml # create a PagerDuty email service at test-email@DOMAIN.pagerduty.com ensure generic email service exists: pagerduty_service.present: - name: my email service - service: description: "email service controlled by salt" escalation_policy_id: "my escalation policy" type: "generic_email" service_key: "test-email" .. code-block:: yaml # create a pagerduty service using cloudwatch integration ensure my cloudwatch service exists: pagerduty_service.present: - name: my cloudwatch service - service: escalation_policy_id: "my escalation policy" type: aws_cloudwatch description: "my cloudwatch service controlled by salt" ''' # TODO: aws_cloudwatch type should be integrated with boto_sns # for convenience, we accept id, name, or email for users # and we accept the id or name for schedules kwargs['service']['name'] = kwargs['name'] # make args mirror PD API structure escalation_policy_id = kwargs['service']['escalation_policy_id'] escalation_policy = __salt__['pagerduty_util.get_resource']('escalation_policies', escalation_policy_id, ['name', 'id'], profile=profile, subdomain=subdomain, api_key=api_key) if escalation_policy: kwargs['service']['escalation_policy_id'] = escalation_policy['id'] r = __salt__['pagerduty_util.resource_present']('services', ['name', 'id'], _diff, profile, subdomain, api_key, **kwargs) return r def _diff(state_data, resource_object): '''helper method to compare salt state info with the PagerDuty API json structure, and determine if we need to update. returns the dict to pass to the PD API to perform the update, or empty dict if no update. ''' objects_differ = None for k, v in state_data['service'].items(): if k == 'escalation_policy_id': resource_value = resource_object['escalation_policy']['id'] elif k == 'service_key': # service_key on create must 'foo' but the GET will return 'foo@bar.pagerduty.com' resource_value = resource_object['service_key'] if '@' in resource_value: resource_value = resource_value[0:resource_value.find('@')] else: resource_value = resource_object[k] if v != resource_value: objects_differ = '{0} {1} {2}'.format(k, v, resource_value) break if objects_differ: return state_data else: return {}
saltstack/salt
salt/states/pagerduty_service.py
_diff
python
def _diff(state_data, resource_object): '''helper method to compare salt state info with the PagerDuty API json structure, and determine if we need to update. returns the dict to pass to the PD API to perform the update, or empty dict if no update. ''' objects_differ = None for k, v in state_data['service'].items(): if k == 'escalation_policy_id': resource_value = resource_object['escalation_policy']['id'] elif k == 'service_key': # service_key on create must 'foo' but the GET will return 'foo@bar.pagerduty.com' resource_value = resource_object['service_key'] if '@' in resource_value: resource_value = resource_value[0:resource_value.find('@')] else: resource_value = resource_object[k] if v != resource_value: objects_differ = '{0} {1} {2}'.format(k, v, resource_value) break if objects_differ: return state_data else: return {}
helper method to compare salt state info with the PagerDuty API json structure, and determine if we need to update. returns the dict to pass to the PD API to perform the update, or empty dict if no update.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/pagerduty_service.py#L101-L126
null
# -*- coding: utf-8 -*- ''' Manage PagerDuty services Escalation policies can be referenced by pagerduty ID or by namea. For example: .. code-block:: yaml ensure test service pagerduty_service.present: - name: 'my service' - escalation_policy_id: 'my escalation policy' - type: nagios ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals def __virtual__(): ''' Only load if the pygerduty module is available in __salt__ ''' return 'pagerduty_service' if 'pagerduty_util.get_resource' in __salt__ else False def present(profile='pagerduty', subdomain=None, api_key=None, **kwargs): ''' Ensure pagerduty service exists. This method accepts as arguments everything defined in https://developer.pagerduty.com/documentation/rest/services/create Note that many arguments are mutually exclusive, depending on the "type" argument. Examples: .. code-block:: yaml # create a PagerDuty email service at test-email@DOMAIN.pagerduty.com ensure generic email service exists: pagerduty_service.present: - name: my email service - service: description: "email service controlled by salt" escalation_policy_id: "my escalation policy" type: "generic_email" service_key: "test-email" .. code-block:: yaml # create a pagerduty service using cloudwatch integration ensure my cloudwatch service exists: pagerduty_service.present: - name: my cloudwatch service - service: escalation_policy_id: "my escalation policy" type: aws_cloudwatch description: "my cloudwatch service controlled by salt" ''' # TODO: aws_cloudwatch type should be integrated with boto_sns # for convenience, we accept id, name, or email for users # and we accept the id or name for schedules kwargs['service']['name'] = kwargs['name'] # make args mirror PD API structure escalation_policy_id = kwargs['service']['escalation_policy_id'] escalation_policy = __salt__['pagerduty_util.get_resource']('escalation_policies', escalation_policy_id, ['name', 'id'], profile=profile, subdomain=subdomain, api_key=api_key) if escalation_policy: kwargs['service']['escalation_policy_id'] = escalation_policy['id'] r = __salt__['pagerduty_util.resource_present']('services', ['name', 'id'], _diff, profile, subdomain, api_key, **kwargs) return r def absent(profile='pagerduty', subdomain=None, api_key=None, **kwargs): ''' Ensure a pagerduty service does not exist. Name can be the service name or pagerduty service id. ''' r = __salt__['pagerduty_util.resource_absent']('services', ['name', 'id'], profile, subdomain, api_key, **kwargs) return r
saltstack/salt
salt/modules/baredoc.py
modules_and_args
python
def modules_and_args(modules=True, states=False, names_only=False): ''' Walk the Salt install tree and return a dictionary or a list of the functions therein as well as their arguments. :param modules: Walk the modules directory if True :param states: Walk the states directory if True :param names_only: Return only a list of the callable functions instead of a dictionary with arguments :return: An OrderedDict with callable function names as keys and lists of arguments as values (if ``names_only``==False) or simply an ordered list of callable function nanes (if ``names_only``==True). CLI Example: (example truncated for brevity) .. code-block:: bash salt myminion baredoc.modules_and_args myminion: ---------- [...] at.atrm: at.jobcheck: at.mod_watch: - name at.present: - unique_tag - name - timespec - job - tag - user at.watch: - unique_tag - name - timespec - job - tag - user [...] ''' dirs = [] module_dir = os.path.dirname(os.path.realpath(__file__)) state_dir = os.path.join(os.path.dirname(module_dir), 'states') if modules: dirs.append(module_dir) if states: dirs.append(state_dir) ret = _mods_with_args(dirs) if names_only: return sorted(ret.keys()) else: return OrderedDict(sorted(ret.items()))
Walk the Salt install tree and return a dictionary or a list of the functions therein as well as their arguments. :param modules: Walk the modules directory if True :param states: Walk the states directory if True :param names_only: Return only a list of the callable functions instead of a dictionary with arguments :return: An OrderedDict with callable function names as keys and lists of arguments as values (if ``names_only``==False) or simply an ordered list of callable function nanes (if ``names_only``==True). CLI Example: (example truncated for brevity) .. code-block:: bash salt myminion baredoc.modules_and_args myminion: ---------- [...] at.atrm: at.jobcheck: at.mod_watch: - name at.present: - unique_tag - name - timespec - job - tag - user at.watch: - unique_tag - name - timespec - job - tag - user [...]
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/baredoc.py#L96-L151
[ "def _mods_with_args(dirs):\n ret = {}\n for d in dirs:\n for m in os.listdir(d):\n if m.endswith('.py'):\n with salt.utils.files.fopen(os.path.join(d, m), 'r') as f:\n in_def = False\n fn_def = u''\n modulename = m.split('.')[0]\n virtualname = None\n\n for l in f:\n l = salt.utils.data.decode(l, encoding='utf-8').rstrip()\n l = re.sub(r'(.*)#(.*)', r'\\1', l)\n if '__virtualname__ =' in l and not virtualname:\n virtualname = l.split()[2].strip(\"'\").strip('\"')\n continue\n if l.startswith(u'def '):\n in_def = True\n fn_def = l\n if ':' in l:\n if in_def:\n if not l.startswith(u'def '):\n fn_def = fn_def + l\n _parse_function_definition(fn_def, virtualname or modulename, ret)\n fn_def = u''\n in_def = False\n continue\n if in_def and not l.startswith(u'def '):\n fn_def = fn_def + l\n return ret\n" ]
# -*- coding: utf-8 -*- ''' Baredoc walks the installed module and state directories and generates dictionaries and lists of the function names and their arguments. .. versionadded:: Neon ''' from __future__ import absolute_import, unicode_literals, print_function # Import python libs import logging import os import re # Import salt libs import salt.loader import salt.runner import salt.state import salt.utils.data import salt.utils.files import salt.utils.args import salt.utils.schema # Import 3rd-party libs from salt.ext import six from salt.utils.odict import OrderedDict log = logging.getLogger(__name__) def _parse_function_definition(fn_def, modulename, ret): args = [] match = re.match(r'def\s+(.*?)\((.*)\):$', fn_def) if match is None: return fn_name = match.group(1) if fn_name.startswith('_'): return if fn_name.endswith('_'): fn_name = fn_name[0:-1] fn_name = fn_name.strip('"') fn_name = fn_name.strip("'") try: raw_args = match.group(2) raw_args = re.sub(r'(.*)\(.*\)(.*)', r'\1\2', raw_args) raw_args = re.sub(r'(.*)\'.*\'(.*)', r'\1\2', raw_args) individual_args = raw_args.split(',') for a in individual_args: if '*' in a: continue args.append(a.split('=')[0].strip()) except AttributeError: pass key = '{}.{}'.format(modulename, fn_name) if key in ret: ret[key].extend(args) else: ret[key] = args ret[key] = list(set(ret[key])) def _mods_with_args(dirs): ret = {} for d in dirs: for m in os.listdir(d): if m.endswith('.py'): with salt.utils.files.fopen(os.path.join(d, m), 'r') as f: in_def = False fn_def = u'' modulename = m.split('.')[0] virtualname = None for l in f: l = salt.utils.data.decode(l, encoding='utf-8').rstrip() l = re.sub(r'(.*)#(.*)', r'\1', l) if '__virtualname__ =' in l and not virtualname: virtualname = l.split()[2].strip("'").strip('"') continue if l.startswith(u'def '): in_def = True fn_def = l if ':' in l: if in_def: if not l.startswith(u'def '): fn_def = fn_def + l _parse_function_definition(fn_def, virtualname or modulename, ret) fn_def = u'' in_def = False continue if in_def and not l.startswith(u'def '): fn_def = fn_def + l return ret def modules_with_test(): ''' Return a list of callable functions that have a ``test=`` flag. CLI Example: (results trimmed for brevity) .. code-block:: bash salt myminion baredoc.modules_with_test myminion: ---------- - boto_elb.set_instances - netconfig.managed - netconfig.replace_pattern - pkg.install - salt.state - state.high - state.highstate ''' mods = modules_and_args() testmods = [] for module_name, module_args in six.iteritems(mods): if 'test' in module_args: testmods.append(module_name) return sorted(testmods)
saltstack/salt
salt/output/pony.py
output
python
def output(data, **kwargs): # pylint: disable=unused-argument ''' Mane function ''' high_out = __salt__['highstate'](data) return subprocess.check_output(['ponysay', salt.utils.data.decode(high_out)])
Mane function
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/output/pony.py#L62-L67
[ "def decode(data, encoding=None, errors='strict', keep=False,\n normalize=False, preserve_dict_class=False, preserve_tuples=False,\n to_str=False):\n '''\n Generic function which will decode whichever type is passed, if necessary.\n Optionally use to_str=True to ensure strings are str types and not unicode\n on Python 2.\n\n If `strict` is True, and `keep` is False, and we fail to decode, a\n UnicodeDecodeError will be raised. Passing `keep` as True allows for the\n original value to silently be returned in cases where decoding fails. This\n can be useful for cases where the data passed to this function is likely to\n contain binary blobs, such as in the case of cp.recv.\n\n If `normalize` is True, then unicodedata.normalize() will be used to\n normalize unicode strings down to a single code point per glyph. It is\n recommended not to normalize unless you know what you're doing. For\n instance, if `data` contains a dictionary, it is possible that normalizing\n will lead to data loss because the following two strings will normalize to\n the same value:\n\n - u'\\\\u044f\\\\u0438\\\\u0306\\\\u0446\\\\u0430.txt'\n - u'\\\\u044f\\\\u0439\\\\u0446\\\\u0430.txt'\n\n One good use case for normalization is in the test suite. For example, on\n some platforms such as Mac OS, os.listdir() will produce the first of the\n two strings above, in which \"й\" is represented as two code points (i.e. one\n for the base character, and one for the breve mark). Normalizing allows for\n a more reliable test case.\n '''\n _decode_func = salt.utils.stringutils.to_unicode \\\n if not to_str \\\n else salt.utils.stringutils.to_str\n if isinstance(data, Mapping):\n return decode_dict(data, encoding, errors, keep, normalize,\n preserve_dict_class, preserve_tuples, to_str)\n elif isinstance(data, list):\n return decode_list(data, encoding, errors, keep, normalize,\n preserve_dict_class, preserve_tuples, to_str)\n elif isinstance(data, tuple):\n return decode_tuple(data, encoding, errors, keep, normalize,\n preserve_dict_class, to_str) \\\n if preserve_tuples \\\n else decode_list(data, encoding, errors, keep, normalize,\n preserve_dict_class, preserve_tuples, to_str)\n else:\n try:\n data = _decode_func(data, encoding, errors, normalize)\n except TypeError:\n # to_unicode raises a TypeError when input is not a\n # string/bytestring/bytearray. This is expected and simply means we\n # are going to leave the value as-is.\n pass\n except UnicodeDecodeError:\n if not keep:\n raise\n return data\n" ]
# -*- coding: utf-8 -*- r''' Display Pony output data structure ================================== :depends: - ponysay CLI program Display output from a pony. Ponies are better than cows because everybody wants a pony. Example output: .. code-block:: cfg < {'local': True} > ----------------- \ \ \ ▄▄▄▄▄▄▄ ▀▄▄████▄▄ ▄▄▄█████▄█▄█▄█▄▄▄ ██████▄▄▄█▄▄█████▄▄ ▀▄▀ █████▄▄█▄▄█████ ▄▄▄███████████▄▄▄ ████▄▄▄▄▄▄███▄▄██ ▄▄▄▄▄▄▄ ████▄████▄██▄▄███ ▄▄▄▄██▄▄▄▄▄▄ █▄███▄▄█▄███▄▄██▄▀ ▄▄███████▄▄███▄▄ ▀▄██████████████▄▄ ▄▄█▄▀▀▀▄▄█████▄▄██ ▀▀▀▀▀█████▄█▄█▄▄▄▄▄▄▄█ ▀▄████▄████ ████▄███▄▄▄▄▄▄▄▄▄ ▄▄█████▄███ ▀▄█▄█▄▄▄██▄▄▄▄▄██ ▄▄██▄██████ ▀▄████████████▄▀ ▄▄█▄██████▄▀ ██▄██▄▄▄▄█▄███▄ ███▄▄▄▄▄██▄▀ ██████ ▀▄▄█████ ▀████████ ▄▄▄▄███ ███████ ██████▄█▄▄ ███████ ████████▀▄▀███▄▄█▄▄ ▄██▄▄████ ████████ ▀▄██▀▄▄▀ █▄▄██████ █▄▄██████ █▄▄▄▄█ █▄▄▄▄█ ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import subprocess # Import Salt libs import salt.utils.data import salt.utils.path __virtualname__ = 'pony' def __virtual__(): if salt.utils.path.which('ponysay'): return __virtualname__ return False # pylint: disable=E0598
saltstack/salt
salt/modules/win_dsc.py
run_config
python
def run_config(path, source=None, config_name=None, config_data=None, config_data_source=None, script_parameters=None, salt_env='base'): r''' Compile a DSC Configuration in the form of a PowerShell script (.ps1) and apply it. The PowerShell script can be cached from the master using the ``source`` option. If there is more than one config within the PowerShell script, the desired configuration can be applied by passing the name in the ``config`` option. This command would be the equivalent of running ``dsc.compile_config`` followed by ``dsc.apply_config``. Args: path (str): The local path to the PowerShell script that contains the DSC Configuration. Required. source (str): The path to the script on ``file_roots`` to cache at the location specified by ``path``. The source file will be cached locally and then executed. If source is not passed, the config script located at ``path`` will be compiled. Optional. config_name (str): The name of the Configuration within the script to apply. If the script contains multiple configurations within the file a ``config_name`` must be specified. If the ``config_name`` is not specified, the name of the file will be used as the ``config_name`` to run. Optional. config_data (str): Configuration data in the form of a hash table that will be passed to the ``ConfigurationData`` parameter when the ``config_name`` is compiled. This can be the path to a ``.psd1`` file containing the proper hash table or the PowerShell code to create the hash table. .. versionadded:: 2017.7.0 config_data_source (str): The path to the ``.psd1`` file on ``file_roots`` to cache at the location specified by ``config_data``. If this is specified, ``config_data`` must be a local path instead of a hash table. .. versionadded:: 2017.7.0 script_parameters (str): Any additional parameters expected by the configuration script. These must be defined in the script itself. .. versionadded:: 2017.7.0 salt_env (str): The salt environment to use when copying the source. Default is 'base' Returns: bool: True if successfully compiled and applied, otherwise False CLI Example: To compile a config from a script that already exists on the system: .. code-block:: bash salt '*' dsc.run_config C:\\DSC\\WebsiteConfig.ps1 To cache a config script to the system from the master and compile it: .. code-block:: bash salt '*' dsc.run_config C:\\DSC\\WebsiteConfig.ps1 salt://dsc/configs/WebsiteConfig.ps1 ''' ret = compile_config(path=path, source=source, config_name=config_name, config_data=config_data, config_data_source=config_data_source, script_parameters=script_parameters, salt_env=salt_env) if ret.get('Exists'): config_path = os.path.dirname(ret['FullName']) return apply_config(config_path) else: return False
r''' Compile a DSC Configuration in the form of a PowerShell script (.ps1) and apply it. The PowerShell script can be cached from the master using the ``source`` option. If there is more than one config within the PowerShell script, the desired configuration can be applied by passing the name in the ``config`` option. This command would be the equivalent of running ``dsc.compile_config`` followed by ``dsc.apply_config``. Args: path (str): The local path to the PowerShell script that contains the DSC Configuration. Required. source (str): The path to the script on ``file_roots`` to cache at the location specified by ``path``. The source file will be cached locally and then executed. If source is not passed, the config script located at ``path`` will be compiled. Optional. config_name (str): The name of the Configuration within the script to apply. If the script contains multiple configurations within the file a ``config_name`` must be specified. If the ``config_name`` is not specified, the name of the file will be used as the ``config_name`` to run. Optional. config_data (str): Configuration data in the form of a hash table that will be passed to the ``ConfigurationData`` parameter when the ``config_name`` is compiled. This can be the path to a ``.psd1`` file containing the proper hash table or the PowerShell code to create the hash table. .. versionadded:: 2017.7.0 config_data_source (str): The path to the ``.psd1`` file on ``file_roots`` to cache at the location specified by ``config_data``. If this is specified, ``config_data`` must be a local path instead of a hash table. .. versionadded:: 2017.7.0 script_parameters (str): Any additional parameters expected by the configuration script. These must be defined in the script itself. .. versionadded:: 2017.7.0 salt_env (str): The salt environment to use when copying the source. Default is 'base' Returns: bool: True if successfully compiled and applied, otherwise False CLI Example: To compile a config from a script that already exists on the system: .. code-block:: bash salt '*' dsc.run_config C:\\DSC\\WebsiteConfig.ps1 To cache a config script to the system from the master and compile it: .. code-block:: bash salt '*' dsc.run_config C:\\DSC\\WebsiteConfig.ps1 salt://dsc/configs/WebsiteConfig.ps1
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_dsc.py#L95-L180
[ "def compile_config(path,\n source=None,\n config_name=None,\n config_data=None,\n config_data_source=None,\n script_parameters=None,\n salt_env='base'):\n r'''\n Compile a config from a PowerShell script (``.ps1``)\n\n Args:\n\n path (str): Path (local) to the script that will create the ``.mof``\n configuration file. If no source is passed, the file must exist\n locally. Required.\n\n source (str): Path to the script on ``file_roots`` to cache at the\n location specified by ``path``. The source file will be cached\n locally and then executed. If source is not passed, the config\n script located at ``path`` will be compiled. Optional.\n\n config_name (str): The name of the Configuration within the script to\n apply. If the script contains multiple configurations within the\n file a ``config_name`` must be specified. If the ``config_name`` is\n not specified, the name of the file will be used as the\n ``config_name`` to run. Optional.\n\n config_data (str): Configuration data in the form of a hash table that\n will be passed to the ``ConfigurationData`` parameter when the\n ``config_name`` is compiled. This can be the path to a ``.psd1``\n file containing the proper hash table or the PowerShell code to\n create the hash table.\n\n .. versionadded:: 2017.7.0\n\n config_data_source (str): The path to the ``.psd1`` file on\n ``file_roots`` to cache at the location specified by\n ``config_data``. If this is specified, ``config_data`` must be a\n local path instead of a hash table.\n\n .. versionadded:: 2017.7.0\n\n script_parameters (str): Any additional parameters expected by the\n configuration script. These must be defined in the script itself.\n\n .. versionadded:: 2017.7.0\n\n salt_env (str): The salt environment to use when copying the source.\n Default is 'base'\n\n Returns:\n dict: A dictionary containing the results of the compilation\n\n CLI Example:\n\n To compile a config from a script that already exists on the system:\n\n .. code-block:: bash\n\n salt '*' dsc.compile_config C:\\\\DSC\\\\WebsiteConfig.ps1\n\n To cache a config script to the system from the master and compile it:\n\n .. code-block:: bash\n\n salt '*' dsc.compile_config C:\\\\DSC\\\\WebsiteConfig.ps1 salt://dsc/configs/WebsiteConfig.ps1\n '''\n if source:\n log.info('DSC: Caching %s', source)\n cached_files = __salt__['cp.get_file'](path=source,\n dest=path,\n saltenv=salt_env,\n makedirs=True)\n if not cached_files:\n error = 'Failed to cache {0}'.format(source)\n log.error('DSC: %s', error)\n raise CommandExecutionError(error)\n\n if config_data_source:\n log.info('DSC: Caching %s', config_data_source)\n cached_files = __salt__['cp.get_file'](path=config_data_source,\n dest=config_data,\n saltenv=salt_env,\n makedirs=True)\n if not cached_files:\n error = 'Failed to cache {0}'.format(config_data_source)\n log.error('DSC: %s', error)\n raise CommandExecutionError(error)\n\n # Make sure the path exists\n if not os.path.exists(path):\n error = '\"{0}\" not found'.format(path)\n log.error('DSC: %s', error)\n raise CommandExecutionError(error)\n\n if config_name is None:\n # If the name of the config isn't passed, make it the name of the .ps1\n config_name = os.path.splitext(os.path.basename(path))[0]\n\n cwd = os.path.dirname(path)\n\n # Run the script and see if the compile command is in the script\n cmd = [path]\n # Add any script parameters\n if script_parameters:\n cmd.append(script_parameters)\n # Select fields to return\n cmd.append('| Select-Object -Property FullName, Extension, Exists, '\n '@{Name=\"LastWriteTime\";Expression={Get-Date ($_.LastWriteTime) '\n '-Format g}}')\n\n cmd = ' '.join(cmd)\n\n ret = _pshell(cmd, cwd)\n\n if ret:\n # Script compiled, return results\n if ret.get('Exists'):\n log.info('DSC: Compile Config: %s', ret)\n return ret\n\n # If you get to this point, the script did not contain a compile command\n # dot source the script to compile the state and generate the mof file\n cmd = ['.', path]\n if script_parameters:\n cmd.append(script_parameters)\n cmd.extend([';', config_name])\n if config_data:\n cmd.append(config_data)\n cmd.append('| Select-Object -Property FullName, Extension, Exists, '\n '@{Name=\"LastWriteTime\";Expression={Get-Date ($_.LastWriteTime) '\n '-Format g}}')\n\n cmd = ' '.join(cmd)\n\n ret = _pshell(cmd, cwd)\n\n if ret:\n # Script compiled, return results\n if ret.get('Exists'):\n log.info('DSC: Compile Config: %s', ret)\n return ret\n\n error = 'Failed to compile config: {0}'.format(path)\n error += '\\nReturned: {0}'.format(ret)\n log.error('DSC: %s', error)\n raise CommandExecutionError(error)\n" ]
# -*- coding: utf-8 -*- ''' Module for working with Windows PowerShell DSC (Desired State Configuration) This module is Alpha This module applies DSC Configurations in the form of PowerShell scripts or MOF (Managed Object Format) schema files. Use the ``psget`` module to manage PowerShell resources. The idea is to leverage Salt to push DSC configuration scripts or MOF files to the Minion. :depends: - PowerShell 5.0 ''' from __future__ import absolute_import, unicode_literals, print_function # Import Python libs import logging import os # Import Salt libs import salt.utils.json import salt.utils.platform import salt.utils.versions from salt.exceptions import CommandExecutionError, SaltInvocationError # Set up logging log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'dsc' def __virtual__(): ''' Set the system module of the kernel is Windows ''' # Verify Windows if not salt.utils.platform.is_windows(): log.debug('DSC: Only available on Windows systems') return False, 'DSC: Only available on Windows systems' # Verify PowerShell powershell_info = __salt__['cmd.shell_info']('powershell') if not powershell_info['installed']: log.debug('DSC: Requires PowerShell') return False, 'DSC: Requires PowerShell' # Verify PowerShell 5.0 or greater if salt.utils.versions.compare(powershell_info['version'], '<', '5.0'): log.debug('DSC: Requires PowerShell 5 or later') return False, 'DSC: Requires PowerShell 5 or later' return __virtualname__ def _pshell(cmd, cwd=None, json_depth=2, ignore_retcode=False): ''' Execute the desired PowerShell command and ensure that it returns data in json format and load that into python. Either return a dict or raise a CommandExecutionError. ''' if 'convertto-json' not in cmd.lower(): cmd = '{0} | ConvertTo-Json -Depth {1}'.format(cmd, json_depth) log.debug('DSC: %s', cmd) results = __salt__['cmd.run_all']( cmd, shell='powershell', cwd=cwd, python_shell=True, ignore_retcode=ignore_retcode) if 'pid' in results: del results['pid'] if 'retcode' not in results or results['retcode'] != 0: # run_all logs an error to log.error, fail hard back to the user raise CommandExecutionError( 'Issue executing PowerShell {0}'.format(cmd), info=results) # Sometimes Powershell returns an empty string, which isn't valid JSON if results['stdout'] == '': results['stdout'] = '{}' try: ret = salt.utils.json.loads(results['stdout'], strict=False) except ValueError: raise CommandExecutionError( 'No JSON results from PowerShell', info=results) log.info('DSC: Returning "%s"', ret) return ret def compile_config(path, source=None, config_name=None, config_data=None, config_data_source=None, script_parameters=None, salt_env='base'): r''' Compile a config from a PowerShell script (``.ps1``) Args: path (str): Path (local) to the script that will create the ``.mof`` configuration file. If no source is passed, the file must exist locally. Required. source (str): Path to the script on ``file_roots`` to cache at the location specified by ``path``. The source file will be cached locally and then executed. If source is not passed, the config script located at ``path`` will be compiled. Optional. config_name (str): The name of the Configuration within the script to apply. If the script contains multiple configurations within the file a ``config_name`` must be specified. If the ``config_name`` is not specified, the name of the file will be used as the ``config_name`` to run. Optional. config_data (str): Configuration data in the form of a hash table that will be passed to the ``ConfigurationData`` parameter when the ``config_name`` is compiled. This can be the path to a ``.psd1`` file containing the proper hash table or the PowerShell code to create the hash table. .. versionadded:: 2017.7.0 config_data_source (str): The path to the ``.psd1`` file on ``file_roots`` to cache at the location specified by ``config_data``. If this is specified, ``config_data`` must be a local path instead of a hash table. .. versionadded:: 2017.7.0 script_parameters (str): Any additional parameters expected by the configuration script. These must be defined in the script itself. .. versionadded:: 2017.7.0 salt_env (str): The salt environment to use when copying the source. Default is 'base' Returns: dict: A dictionary containing the results of the compilation CLI Example: To compile a config from a script that already exists on the system: .. code-block:: bash salt '*' dsc.compile_config C:\\DSC\\WebsiteConfig.ps1 To cache a config script to the system from the master and compile it: .. code-block:: bash salt '*' dsc.compile_config C:\\DSC\\WebsiteConfig.ps1 salt://dsc/configs/WebsiteConfig.ps1 ''' if source: log.info('DSC: Caching %s', source) cached_files = __salt__['cp.get_file'](path=source, dest=path, saltenv=salt_env, makedirs=True) if not cached_files: error = 'Failed to cache {0}'.format(source) log.error('DSC: %s', error) raise CommandExecutionError(error) if config_data_source: log.info('DSC: Caching %s', config_data_source) cached_files = __salt__['cp.get_file'](path=config_data_source, dest=config_data, saltenv=salt_env, makedirs=True) if not cached_files: error = 'Failed to cache {0}'.format(config_data_source) log.error('DSC: %s', error) raise CommandExecutionError(error) # Make sure the path exists if not os.path.exists(path): error = '"{0}" not found'.format(path) log.error('DSC: %s', error) raise CommandExecutionError(error) if config_name is None: # If the name of the config isn't passed, make it the name of the .ps1 config_name = os.path.splitext(os.path.basename(path))[0] cwd = os.path.dirname(path) # Run the script and see if the compile command is in the script cmd = [path] # Add any script parameters if script_parameters: cmd.append(script_parameters) # Select fields to return cmd.append('| Select-Object -Property FullName, Extension, Exists, ' '@{Name="LastWriteTime";Expression={Get-Date ($_.LastWriteTime) ' '-Format g}}') cmd = ' '.join(cmd) ret = _pshell(cmd, cwd) if ret: # Script compiled, return results if ret.get('Exists'): log.info('DSC: Compile Config: %s', ret) return ret # If you get to this point, the script did not contain a compile command # dot source the script to compile the state and generate the mof file cmd = ['.', path] if script_parameters: cmd.append(script_parameters) cmd.extend([';', config_name]) if config_data: cmd.append(config_data) cmd.append('| Select-Object -Property FullName, Extension, Exists, ' '@{Name="LastWriteTime";Expression={Get-Date ($_.LastWriteTime) ' '-Format g}}') cmd = ' '.join(cmd) ret = _pshell(cmd, cwd) if ret: # Script compiled, return results if ret.get('Exists'): log.info('DSC: Compile Config: %s', ret) return ret error = 'Failed to compile config: {0}'.format(path) error += '\nReturned: {0}'.format(ret) log.error('DSC: %s', error) raise CommandExecutionError(error) def apply_config(path, source=None, salt_env='base'): r''' Run an compiled DSC configuration (a folder containing a .mof file). The folder can be cached from the salt master using the ``source`` option. Args: path (str): Local path to the directory that contains the .mof configuration file to apply. Required. source (str): Path to the directory that contains the .mof file on the ``file_roots``. The source directory will be copied to the path directory and then executed. If the path and source directories differ, the source directory will be applied. If source is not passed, the config located at ``path`` will be applied. Optional. salt_env (str): The salt environment to use when copying your source. Default is 'base' Returns: bool: True if successful, otherwise False CLI Example: To apply a config that already exists on the the system .. code-block:: bash salt '*' dsc.apply_config C:\\DSC\\WebSiteConfiguration To cache a configuration from the master and apply it: .. code-block:: bash salt '*' dsc.apply_config C:\\DSC\\WebSiteConfiguration salt://dsc/configs/WebSiteConfiguration ''' # If you're getting an error along the lines of "The client cannot connect # to the destination specified in the request.", try the following: # Enable-PSRemoting -SkipNetworkProfileCheck config = path if source: # Make sure the folder names match path_name = os.path.basename(os.path.normpath(path)) source_name = os.path.basename(os.path.normpath(source)) if path_name.lower() != source_name.lower(): # Append the Source name to the Path path = '{0}\\{1}'.format(path, source_name) log.debug('DSC: %s appended to the path.', source_name) # Destination path minus the basename dest_path = os.path.dirname(os.path.normpath(path)) log.info('DSC: Caching %s', source) cached_files = __salt__['cp.get_dir'](source, dest_path, salt_env) if not cached_files: error = 'Failed to copy {0}'.format(source) log.error('DSC: %s', error) raise CommandExecutionError(error) else: config = os.path.dirname(cached_files[0]) # Make sure the path exists if not os.path.exists(config): error = '{0} not found'.format(config) log.error('DSC: %s', error) raise CommandExecutionError(error) # Run the DSC Configuration # Putting quotes around the parameter protects against command injection cmd = 'Start-DscConfiguration -Path "{0}" -Wait -Force'.format(config) _pshell(cmd) cmd = '$status = Get-DscConfigurationStatus; $status.Status' ret = _pshell(cmd) log.info('DSC: Apply Config: %s', ret) return ret == 'Success' or ret == {} def get_config(): ''' Get the current DSC Configuration Returns: dict: A dictionary representing the DSC Configuration on the machine Raises: CommandExecutionError: On failure CLI Example: .. code-block:: bash salt '*' dsc.get_config ''' cmd = 'Get-DscConfiguration | Select-Object * -ExcludeProperty Cim*' try: raw_config = _pshell(cmd, ignore_retcode=True) except CommandExecutionError as exc: if 'Current configuration does not exist' in exc.info['stderr']: raise CommandExecutionError('Not Configured') raise config = dict() if raw_config: # Get DSC Configuration Name if 'ConfigurationName' in raw_config[0]: config[raw_config[0]['ConfigurationName']] = {} # Add all DSC Configurations by ResourceId for item in raw_config: config[item['ConfigurationName']][item['ResourceId']] = {} for key in item: if key not in ['ConfigurationName', 'ResourceId']: config[item['ConfigurationName']][item['ResourceId']][key] = item[key] return config def remove_config(reset=False): ''' Remove the current DSC Configuration. Removes current, pending, and previous dsc configurations. .. versionadded:: 2017.7.5 Args: reset (bool): Attempts to reset the DSC configuration by removing the following from ``C:\\Windows\\System32\\Configuration``: - File: DSCStatusHistory.mof - File: DSCEngineCache.mof - Dir: ConfigurationStatus Default is False .. warning:: ``remove_config`` may fail to reset the DSC environment if any of the files in the ``ConfigurationStatus`` directory. If you wait a few minutes and run again, it may complete successfully. Returns: bool: True if successful Raises: CommandExecutionError: On failure CLI Example: .. code-block:: bash salt '*' dsc.remove_config True ''' # Stopping a running config (not likely to occur) cmd = 'Stop-DscConfiguration' log.info('DSC: Stopping Running Configuration') try: _pshell(cmd) except CommandExecutionError as exc: if exc.info['retcode'] != 0: raise CommandExecutionError('Failed to Stop DSC Configuration', info=exc.info) log.info('DSC: %s', exc.info['stdout']) # Remove configuration files cmd = 'Remove-DscConfigurationDocument -Stage Current, Pending, Previous ' \ '-Force' log.info('DSC: Removing Configuration') try: _pshell(cmd) except CommandExecutionError as exc: if exc.info['retcode'] != 0: raise CommandExecutionError('Failed to remove DSC Configuration', info=exc.info) log.info('DSC: %s', exc.info['stdout']) if not reset: return True def _remove_fs_obj(path): if os.path.exists(path): log.info('DSC: Removing %s', path) if not __salt__['file.remove'](path): error = 'Failed to remove {0}'.format(path) log.error('DSC: %s', error) raise CommandExecutionError(error) dsc_config_dir = '{0}\\System32\\Configuration' \ ''.format(os.getenv('SystemRoot', 'C:\\Windows')) # Remove History _remove_fs_obj('{0}\\DSCStatusHistory.mof'.format(dsc_config_dir)) # Remove Engine Cache _remove_fs_obj('{0}\\DSCEngineCache.mof'.format(dsc_config_dir)) # Remove Status Directory _remove_fs_obj('{0}\\ConfigurationStatus'.format(dsc_config_dir)) return True def restore_config(): ''' Reapplies the previous configuration. .. versionadded:: 2017.7.5 .. note:: The current configuration will be come the previous configuration. If run a second time back-to-back it is like toggling between two configs. Returns: bool: True if successfully restored Raises: CommandExecutionError: On failure CLI Example: .. code-block:: bash salt '*' dsc.restore_config ''' cmd = 'Restore-DscConfiguration' try: _pshell(cmd, ignore_retcode=True) except CommandExecutionError as exc: if 'A previous configuration does not exist' in exc.info['stderr']: raise CommandExecutionError('Previous Configuration Not Found') raise return True def test_config(): ''' Tests the current applied DSC Configuration Returns: bool: True if successfully applied, otherwise False CLI Example: .. code-block:: bash salt '*' dsc.test_config ''' cmd = 'Test-DscConfiguration' try: _pshell(cmd, ignore_retcode=True) except CommandExecutionError as exc: if 'Current configuration does not exist' in exc.info['stderr']: raise CommandExecutionError('Not Configured') raise def get_config_status(): ''' Get the status of the current DSC Configuration Returns: dict: A dictionary representing the status of the current DSC Configuration on the machine CLI Example: .. code-block:: bash salt '*' dsc.get_config_status ''' cmd = 'Get-DscConfigurationStatus | ' \ 'Select-Object -Property HostName, Status, MetaData, ' \ '@{Name="StartDate";Expression={Get-Date ($_.StartDate) -Format g}}, ' \ 'Type, Mode, RebootRequested, NumberofResources' try: return _pshell(cmd, ignore_retcode=True) except CommandExecutionError as exc: if 'No status information available' in exc.info['stderr']: raise CommandExecutionError('Not Configured') raise def get_lcm_config(): ''' Get the current Local Configuration Manager settings Returns: dict: A dictionary representing the Local Configuration Manager settings on the machine CLI Example: .. code-block:: bash salt '*' dsc.get_lcm_config ''' cmd = 'Get-DscLocalConfigurationManager | ' \ 'Select-Object -Property ConfigurationModeFrequencyMins, LCMState, ' \ 'RebootNodeIfNeeded, ConfigurationMode, ActionAfterReboot, ' \ 'RefreshMode, CertificateID, ConfigurationID, RefreshFrequencyMins, ' \ 'AllowModuleOverwrite, DebugMode, StatusRetentionTimeInDays ' return _pshell(cmd) def set_lcm_config(config_mode=None, config_mode_freq=None, refresh_freq=None, reboot_if_needed=None, action_after_reboot=None, refresh_mode=None, certificate_id=None, configuration_id=None, allow_module_overwrite=None, debug_mode=False, status_retention_days=None): ''' For detailed descriptions of the parameters see: https://msdn.microsoft.com/en-us/PowerShell/DSC/metaConfig config_mode (str): How the LCM applies the configuration. Valid values are: - ApplyOnly - ApplyAndMonitor - ApplyAndAutoCorrect config_mode_freq (int): How often, in minutes, the current configuration is checked and applied. Ignored if config_mode is set to ApplyOnly. Default is 15. refresh_mode (str): How the LCM gets configurations. Valid values are: - Disabled - Push - Pull refresh_freq (int): How often, in minutes, the LCM checks for updated configurations. (pull mode only) Default is 30. reboot_if_needed (bool): Reboot the machine if needed after a configuration is applied. Default is False. action_after_reboot (str): Action to take after reboot. Valid values are: - ContinueConfiguration - StopConfiguration certificate_id (guid): A GUID that specifies a certificate used to access the configuration: (pull mode) configuration_id (guid): A GUID that identifies the config file to get from a pull server. (pull mode) allow_module_overwrite (bool): New configs are allowed to overwrite old ones on the target node. debug_mode (str): Sets the debug level. Valid values are: - None - ForceModuleImport - All status_retention_days (int): Number of days to keep status of the current config. .. note:: Either ``config_mode_freq`` or ``refresh_freq`` needs to be a multiple of the other. See documentation on MSDN for more details. Returns: bool: True if successful, otherwise False CLI Example: .. code-block:: bash salt '*' dsc.set_lcm_config ApplyOnly ''' temp_dir = os.getenv('TEMP', '{0}\\temp'.format(os.getenv('WINDIR'))) cmd = 'Configuration SaltConfig {' cmd += ' Node localhost {' cmd += ' LocalConfigurationManager {' if config_mode: if config_mode not in ('ApplyOnly', 'ApplyAndMonitor', 'ApplyAndAutoCorrect'): error = 'config_mode must be one of ApplyOnly, ApplyAndMonitor, ' \ 'or ApplyAndAutoCorrect. Passed {0}'.format(config_mode) raise SaltInvocationError(error) cmd += ' ConfigurationMode = "{0}";'.format(config_mode) if config_mode_freq: if not isinstance(config_mode_freq, int): error = 'config_mode_freq must be an integer. Passed {0}'.format( config_mode_freq ) raise SaltInvocationError(error) cmd += ' ConfigurationModeFrequencyMins = {0};'.format(config_mode_freq) if refresh_mode: if refresh_mode not in ('Disabled', 'Push', 'Pull'): raise SaltInvocationError( 'refresh_mode must be one of Disabled, Push, or Pull' ) cmd += ' RefreshMode = "{0}";'.format(refresh_mode) if refresh_freq: if not isinstance(refresh_freq, int): raise SaltInvocationError('refresh_freq must be an integer') cmd += ' RefreshFrequencyMins = {0};'.format(refresh_freq) if reboot_if_needed is not None: if not isinstance(reboot_if_needed, bool): raise SaltInvocationError('reboot_if_needed must be a boolean value') if reboot_if_needed: reboot_if_needed = '$true' else: reboot_if_needed = '$false' cmd += ' RebootNodeIfNeeded = {0};'.format(reboot_if_needed) if action_after_reboot: if action_after_reboot not in ('ContinueConfiguration', 'StopConfiguration'): raise SaltInvocationError( 'action_after_reboot must be one of ' 'ContinueConfiguration or StopConfiguration' ) cmd += ' ActionAfterReboot = "{0}"'.format(action_after_reboot) if certificate_id is not None: if certificate_id == '': certificate_id = None cmd += ' CertificateID = "{0}";'.format(certificate_id) if configuration_id is not None: if configuration_id == '': configuration_id = None cmd += ' ConfigurationID = "{0}";'.format(configuration_id) if allow_module_overwrite is not None: if not isinstance(allow_module_overwrite, bool): raise SaltInvocationError('allow_module_overwrite must be a boolean value') if allow_module_overwrite: allow_module_overwrite = '$true' else: allow_module_overwrite = '$false' cmd += ' AllowModuleOverwrite = {0};'.format(allow_module_overwrite) if debug_mode is not False: if debug_mode is None: debug_mode = 'None' if debug_mode not in ('None', 'ForceModuleImport', 'All'): raise SaltInvocationError( 'debug_mode must be one of None, ForceModuleImport, ' 'ResourceScriptBreakAll, or All' ) cmd += ' DebugMode = "{0}";'.format(debug_mode) if status_retention_days: if not isinstance(status_retention_days, int): raise SaltInvocationError('status_retention_days must be an integer') cmd += ' StatusRetentionTimeInDays = {0};'.format(status_retention_days) cmd += ' }}};' cmd += r'SaltConfig -OutputPath "{0}\SaltConfig"'.format(temp_dir) # Execute Config to create the .mof _pshell(cmd) # Apply the config cmd = r'Set-DscLocalConfigurationManager -Path "{0}\SaltConfig"' \ r''.format(temp_dir) ret = __salt__['cmd.run_all'](cmd, shell='powershell', python_shell=True) __salt__['file.remove'](r'{0}\SaltConfig'.format(temp_dir)) if not ret['retcode']: log.info('DSC: LCM config applied successfully') return True else: log.error('DSC: Failed to apply LCM config. Error %s', ret) return False
saltstack/salt
salt/modules/win_dsc.py
compile_config
python
def compile_config(path, source=None, config_name=None, config_data=None, config_data_source=None, script_parameters=None, salt_env='base'): r''' Compile a config from a PowerShell script (``.ps1``) Args: path (str): Path (local) to the script that will create the ``.mof`` configuration file. If no source is passed, the file must exist locally. Required. source (str): Path to the script on ``file_roots`` to cache at the location specified by ``path``. The source file will be cached locally and then executed. If source is not passed, the config script located at ``path`` will be compiled. Optional. config_name (str): The name of the Configuration within the script to apply. If the script contains multiple configurations within the file a ``config_name`` must be specified. If the ``config_name`` is not specified, the name of the file will be used as the ``config_name`` to run. Optional. config_data (str): Configuration data in the form of a hash table that will be passed to the ``ConfigurationData`` parameter when the ``config_name`` is compiled. This can be the path to a ``.psd1`` file containing the proper hash table or the PowerShell code to create the hash table. .. versionadded:: 2017.7.0 config_data_source (str): The path to the ``.psd1`` file on ``file_roots`` to cache at the location specified by ``config_data``. If this is specified, ``config_data`` must be a local path instead of a hash table. .. versionadded:: 2017.7.0 script_parameters (str): Any additional parameters expected by the configuration script. These must be defined in the script itself. .. versionadded:: 2017.7.0 salt_env (str): The salt environment to use when copying the source. Default is 'base' Returns: dict: A dictionary containing the results of the compilation CLI Example: To compile a config from a script that already exists on the system: .. code-block:: bash salt '*' dsc.compile_config C:\\DSC\\WebsiteConfig.ps1 To cache a config script to the system from the master and compile it: .. code-block:: bash salt '*' dsc.compile_config C:\\DSC\\WebsiteConfig.ps1 salt://dsc/configs/WebsiteConfig.ps1 ''' if source: log.info('DSC: Caching %s', source) cached_files = __salt__['cp.get_file'](path=source, dest=path, saltenv=salt_env, makedirs=True) if not cached_files: error = 'Failed to cache {0}'.format(source) log.error('DSC: %s', error) raise CommandExecutionError(error) if config_data_source: log.info('DSC: Caching %s', config_data_source) cached_files = __salt__['cp.get_file'](path=config_data_source, dest=config_data, saltenv=salt_env, makedirs=True) if not cached_files: error = 'Failed to cache {0}'.format(config_data_source) log.error('DSC: %s', error) raise CommandExecutionError(error) # Make sure the path exists if not os.path.exists(path): error = '"{0}" not found'.format(path) log.error('DSC: %s', error) raise CommandExecutionError(error) if config_name is None: # If the name of the config isn't passed, make it the name of the .ps1 config_name = os.path.splitext(os.path.basename(path))[0] cwd = os.path.dirname(path) # Run the script and see if the compile command is in the script cmd = [path] # Add any script parameters if script_parameters: cmd.append(script_parameters) # Select fields to return cmd.append('| Select-Object -Property FullName, Extension, Exists, ' '@{Name="LastWriteTime";Expression={Get-Date ($_.LastWriteTime) ' '-Format g}}') cmd = ' '.join(cmd) ret = _pshell(cmd, cwd) if ret: # Script compiled, return results if ret.get('Exists'): log.info('DSC: Compile Config: %s', ret) return ret # If you get to this point, the script did not contain a compile command # dot source the script to compile the state and generate the mof file cmd = ['.', path] if script_parameters: cmd.append(script_parameters) cmd.extend([';', config_name]) if config_data: cmd.append(config_data) cmd.append('| Select-Object -Property FullName, Extension, Exists, ' '@{Name="LastWriteTime";Expression={Get-Date ($_.LastWriteTime) ' '-Format g}}') cmd = ' '.join(cmd) ret = _pshell(cmd, cwd) if ret: # Script compiled, return results if ret.get('Exists'): log.info('DSC: Compile Config: %s', ret) return ret error = 'Failed to compile config: {0}'.format(path) error += '\nReturned: {0}'.format(ret) log.error('DSC: %s', error) raise CommandExecutionError(error)
r''' Compile a config from a PowerShell script (``.ps1``) Args: path (str): Path (local) to the script that will create the ``.mof`` configuration file. If no source is passed, the file must exist locally. Required. source (str): Path to the script on ``file_roots`` to cache at the location specified by ``path``. The source file will be cached locally and then executed. If source is not passed, the config script located at ``path`` will be compiled. Optional. config_name (str): The name of the Configuration within the script to apply. If the script contains multiple configurations within the file a ``config_name`` must be specified. If the ``config_name`` is not specified, the name of the file will be used as the ``config_name`` to run. Optional. config_data (str): Configuration data in the form of a hash table that will be passed to the ``ConfigurationData`` parameter when the ``config_name`` is compiled. This can be the path to a ``.psd1`` file containing the proper hash table or the PowerShell code to create the hash table. .. versionadded:: 2017.7.0 config_data_source (str): The path to the ``.psd1`` file on ``file_roots`` to cache at the location specified by ``config_data``. If this is specified, ``config_data`` must be a local path instead of a hash table. .. versionadded:: 2017.7.0 script_parameters (str): Any additional parameters expected by the configuration script. These must be defined in the script itself. .. versionadded:: 2017.7.0 salt_env (str): The salt environment to use when copying the source. Default is 'base' Returns: dict: A dictionary containing the results of the compilation CLI Example: To compile a config from a script that already exists on the system: .. code-block:: bash salt '*' dsc.compile_config C:\\DSC\\WebsiteConfig.ps1 To cache a config script to the system from the master and compile it: .. code-block:: bash salt '*' dsc.compile_config C:\\DSC\\WebsiteConfig.ps1 salt://dsc/configs/WebsiteConfig.ps1
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_dsc.py#L183-L329
[ "def _pshell(cmd, cwd=None, json_depth=2, ignore_retcode=False):\n '''\n Execute the desired PowerShell command and ensure that it returns data\n in json format and load that into python. Either return a dict or raise a\n CommandExecutionError.\n '''\n if 'convertto-json' not in cmd.lower():\n cmd = '{0} | ConvertTo-Json -Depth {1}'.format(cmd, json_depth)\n log.debug('DSC: %s', cmd)\n results = __salt__['cmd.run_all'](\n cmd, shell='powershell', cwd=cwd, python_shell=True,\n ignore_retcode=ignore_retcode)\n\n if 'pid' in results:\n del results['pid']\n\n if 'retcode' not in results or results['retcode'] != 0:\n # run_all logs an error to log.error, fail hard back to the user\n raise CommandExecutionError(\n 'Issue executing PowerShell {0}'.format(cmd), info=results)\n\n # Sometimes Powershell returns an empty string, which isn't valid JSON\n if results['stdout'] == '':\n results['stdout'] = '{}'\n\n try:\n ret = salt.utils.json.loads(results['stdout'], strict=False)\n except ValueError:\n raise CommandExecutionError(\n 'No JSON results from PowerShell', info=results)\n\n log.info('DSC: Returning \"%s\"', ret)\n return ret\n" ]
# -*- coding: utf-8 -*- ''' Module for working with Windows PowerShell DSC (Desired State Configuration) This module is Alpha This module applies DSC Configurations in the form of PowerShell scripts or MOF (Managed Object Format) schema files. Use the ``psget`` module to manage PowerShell resources. The idea is to leverage Salt to push DSC configuration scripts or MOF files to the Minion. :depends: - PowerShell 5.0 ''' from __future__ import absolute_import, unicode_literals, print_function # Import Python libs import logging import os # Import Salt libs import salt.utils.json import salt.utils.platform import salt.utils.versions from salt.exceptions import CommandExecutionError, SaltInvocationError # Set up logging log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'dsc' def __virtual__(): ''' Set the system module of the kernel is Windows ''' # Verify Windows if not salt.utils.platform.is_windows(): log.debug('DSC: Only available on Windows systems') return False, 'DSC: Only available on Windows systems' # Verify PowerShell powershell_info = __salt__['cmd.shell_info']('powershell') if not powershell_info['installed']: log.debug('DSC: Requires PowerShell') return False, 'DSC: Requires PowerShell' # Verify PowerShell 5.0 or greater if salt.utils.versions.compare(powershell_info['version'], '<', '5.0'): log.debug('DSC: Requires PowerShell 5 or later') return False, 'DSC: Requires PowerShell 5 or later' return __virtualname__ def _pshell(cmd, cwd=None, json_depth=2, ignore_retcode=False): ''' Execute the desired PowerShell command and ensure that it returns data in json format and load that into python. Either return a dict or raise a CommandExecutionError. ''' if 'convertto-json' not in cmd.lower(): cmd = '{0} | ConvertTo-Json -Depth {1}'.format(cmd, json_depth) log.debug('DSC: %s', cmd) results = __salt__['cmd.run_all']( cmd, shell='powershell', cwd=cwd, python_shell=True, ignore_retcode=ignore_retcode) if 'pid' in results: del results['pid'] if 'retcode' not in results or results['retcode'] != 0: # run_all logs an error to log.error, fail hard back to the user raise CommandExecutionError( 'Issue executing PowerShell {0}'.format(cmd), info=results) # Sometimes Powershell returns an empty string, which isn't valid JSON if results['stdout'] == '': results['stdout'] = '{}' try: ret = salt.utils.json.loads(results['stdout'], strict=False) except ValueError: raise CommandExecutionError( 'No JSON results from PowerShell', info=results) log.info('DSC: Returning "%s"', ret) return ret def run_config(path, source=None, config_name=None, config_data=None, config_data_source=None, script_parameters=None, salt_env='base'): r''' Compile a DSC Configuration in the form of a PowerShell script (.ps1) and apply it. The PowerShell script can be cached from the master using the ``source`` option. If there is more than one config within the PowerShell script, the desired configuration can be applied by passing the name in the ``config`` option. This command would be the equivalent of running ``dsc.compile_config`` followed by ``dsc.apply_config``. Args: path (str): The local path to the PowerShell script that contains the DSC Configuration. Required. source (str): The path to the script on ``file_roots`` to cache at the location specified by ``path``. The source file will be cached locally and then executed. If source is not passed, the config script located at ``path`` will be compiled. Optional. config_name (str): The name of the Configuration within the script to apply. If the script contains multiple configurations within the file a ``config_name`` must be specified. If the ``config_name`` is not specified, the name of the file will be used as the ``config_name`` to run. Optional. config_data (str): Configuration data in the form of a hash table that will be passed to the ``ConfigurationData`` parameter when the ``config_name`` is compiled. This can be the path to a ``.psd1`` file containing the proper hash table or the PowerShell code to create the hash table. .. versionadded:: 2017.7.0 config_data_source (str): The path to the ``.psd1`` file on ``file_roots`` to cache at the location specified by ``config_data``. If this is specified, ``config_data`` must be a local path instead of a hash table. .. versionadded:: 2017.7.0 script_parameters (str): Any additional parameters expected by the configuration script. These must be defined in the script itself. .. versionadded:: 2017.7.0 salt_env (str): The salt environment to use when copying the source. Default is 'base' Returns: bool: True if successfully compiled and applied, otherwise False CLI Example: To compile a config from a script that already exists on the system: .. code-block:: bash salt '*' dsc.run_config C:\\DSC\\WebsiteConfig.ps1 To cache a config script to the system from the master and compile it: .. code-block:: bash salt '*' dsc.run_config C:\\DSC\\WebsiteConfig.ps1 salt://dsc/configs/WebsiteConfig.ps1 ''' ret = compile_config(path=path, source=source, config_name=config_name, config_data=config_data, config_data_source=config_data_source, script_parameters=script_parameters, salt_env=salt_env) if ret.get('Exists'): config_path = os.path.dirname(ret['FullName']) return apply_config(config_path) else: return False def apply_config(path, source=None, salt_env='base'): r''' Run an compiled DSC configuration (a folder containing a .mof file). The folder can be cached from the salt master using the ``source`` option. Args: path (str): Local path to the directory that contains the .mof configuration file to apply. Required. source (str): Path to the directory that contains the .mof file on the ``file_roots``. The source directory will be copied to the path directory and then executed. If the path and source directories differ, the source directory will be applied. If source is not passed, the config located at ``path`` will be applied. Optional. salt_env (str): The salt environment to use when copying your source. Default is 'base' Returns: bool: True if successful, otherwise False CLI Example: To apply a config that already exists on the the system .. code-block:: bash salt '*' dsc.apply_config C:\\DSC\\WebSiteConfiguration To cache a configuration from the master and apply it: .. code-block:: bash salt '*' dsc.apply_config C:\\DSC\\WebSiteConfiguration salt://dsc/configs/WebSiteConfiguration ''' # If you're getting an error along the lines of "The client cannot connect # to the destination specified in the request.", try the following: # Enable-PSRemoting -SkipNetworkProfileCheck config = path if source: # Make sure the folder names match path_name = os.path.basename(os.path.normpath(path)) source_name = os.path.basename(os.path.normpath(source)) if path_name.lower() != source_name.lower(): # Append the Source name to the Path path = '{0}\\{1}'.format(path, source_name) log.debug('DSC: %s appended to the path.', source_name) # Destination path minus the basename dest_path = os.path.dirname(os.path.normpath(path)) log.info('DSC: Caching %s', source) cached_files = __salt__['cp.get_dir'](source, dest_path, salt_env) if not cached_files: error = 'Failed to copy {0}'.format(source) log.error('DSC: %s', error) raise CommandExecutionError(error) else: config = os.path.dirname(cached_files[0]) # Make sure the path exists if not os.path.exists(config): error = '{0} not found'.format(config) log.error('DSC: %s', error) raise CommandExecutionError(error) # Run the DSC Configuration # Putting quotes around the parameter protects against command injection cmd = 'Start-DscConfiguration -Path "{0}" -Wait -Force'.format(config) _pshell(cmd) cmd = '$status = Get-DscConfigurationStatus; $status.Status' ret = _pshell(cmd) log.info('DSC: Apply Config: %s', ret) return ret == 'Success' or ret == {} def get_config(): ''' Get the current DSC Configuration Returns: dict: A dictionary representing the DSC Configuration on the machine Raises: CommandExecutionError: On failure CLI Example: .. code-block:: bash salt '*' dsc.get_config ''' cmd = 'Get-DscConfiguration | Select-Object * -ExcludeProperty Cim*' try: raw_config = _pshell(cmd, ignore_retcode=True) except CommandExecutionError as exc: if 'Current configuration does not exist' in exc.info['stderr']: raise CommandExecutionError('Not Configured') raise config = dict() if raw_config: # Get DSC Configuration Name if 'ConfigurationName' in raw_config[0]: config[raw_config[0]['ConfigurationName']] = {} # Add all DSC Configurations by ResourceId for item in raw_config: config[item['ConfigurationName']][item['ResourceId']] = {} for key in item: if key not in ['ConfigurationName', 'ResourceId']: config[item['ConfigurationName']][item['ResourceId']][key] = item[key] return config def remove_config(reset=False): ''' Remove the current DSC Configuration. Removes current, pending, and previous dsc configurations. .. versionadded:: 2017.7.5 Args: reset (bool): Attempts to reset the DSC configuration by removing the following from ``C:\\Windows\\System32\\Configuration``: - File: DSCStatusHistory.mof - File: DSCEngineCache.mof - Dir: ConfigurationStatus Default is False .. warning:: ``remove_config`` may fail to reset the DSC environment if any of the files in the ``ConfigurationStatus`` directory. If you wait a few minutes and run again, it may complete successfully. Returns: bool: True if successful Raises: CommandExecutionError: On failure CLI Example: .. code-block:: bash salt '*' dsc.remove_config True ''' # Stopping a running config (not likely to occur) cmd = 'Stop-DscConfiguration' log.info('DSC: Stopping Running Configuration') try: _pshell(cmd) except CommandExecutionError as exc: if exc.info['retcode'] != 0: raise CommandExecutionError('Failed to Stop DSC Configuration', info=exc.info) log.info('DSC: %s', exc.info['stdout']) # Remove configuration files cmd = 'Remove-DscConfigurationDocument -Stage Current, Pending, Previous ' \ '-Force' log.info('DSC: Removing Configuration') try: _pshell(cmd) except CommandExecutionError as exc: if exc.info['retcode'] != 0: raise CommandExecutionError('Failed to remove DSC Configuration', info=exc.info) log.info('DSC: %s', exc.info['stdout']) if not reset: return True def _remove_fs_obj(path): if os.path.exists(path): log.info('DSC: Removing %s', path) if not __salt__['file.remove'](path): error = 'Failed to remove {0}'.format(path) log.error('DSC: %s', error) raise CommandExecutionError(error) dsc_config_dir = '{0}\\System32\\Configuration' \ ''.format(os.getenv('SystemRoot', 'C:\\Windows')) # Remove History _remove_fs_obj('{0}\\DSCStatusHistory.mof'.format(dsc_config_dir)) # Remove Engine Cache _remove_fs_obj('{0}\\DSCEngineCache.mof'.format(dsc_config_dir)) # Remove Status Directory _remove_fs_obj('{0}\\ConfigurationStatus'.format(dsc_config_dir)) return True def restore_config(): ''' Reapplies the previous configuration. .. versionadded:: 2017.7.5 .. note:: The current configuration will be come the previous configuration. If run a second time back-to-back it is like toggling between two configs. Returns: bool: True if successfully restored Raises: CommandExecutionError: On failure CLI Example: .. code-block:: bash salt '*' dsc.restore_config ''' cmd = 'Restore-DscConfiguration' try: _pshell(cmd, ignore_retcode=True) except CommandExecutionError as exc: if 'A previous configuration does not exist' in exc.info['stderr']: raise CommandExecutionError('Previous Configuration Not Found') raise return True def test_config(): ''' Tests the current applied DSC Configuration Returns: bool: True if successfully applied, otherwise False CLI Example: .. code-block:: bash salt '*' dsc.test_config ''' cmd = 'Test-DscConfiguration' try: _pshell(cmd, ignore_retcode=True) except CommandExecutionError as exc: if 'Current configuration does not exist' in exc.info['stderr']: raise CommandExecutionError('Not Configured') raise def get_config_status(): ''' Get the status of the current DSC Configuration Returns: dict: A dictionary representing the status of the current DSC Configuration on the machine CLI Example: .. code-block:: bash salt '*' dsc.get_config_status ''' cmd = 'Get-DscConfigurationStatus | ' \ 'Select-Object -Property HostName, Status, MetaData, ' \ '@{Name="StartDate";Expression={Get-Date ($_.StartDate) -Format g}}, ' \ 'Type, Mode, RebootRequested, NumberofResources' try: return _pshell(cmd, ignore_retcode=True) except CommandExecutionError as exc: if 'No status information available' in exc.info['stderr']: raise CommandExecutionError('Not Configured') raise def get_lcm_config(): ''' Get the current Local Configuration Manager settings Returns: dict: A dictionary representing the Local Configuration Manager settings on the machine CLI Example: .. code-block:: bash salt '*' dsc.get_lcm_config ''' cmd = 'Get-DscLocalConfigurationManager | ' \ 'Select-Object -Property ConfigurationModeFrequencyMins, LCMState, ' \ 'RebootNodeIfNeeded, ConfigurationMode, ActionAfterReboot, ' \ 'RefreshMode, CertificateID, ConfigurationID, RefreshFrequencyMins, ' \ 'AllowModuleOverwrite, DebugMode, StatusRetentionTimeInDays ' return _pshell(cmd) def set_lcm_config(config_mode=None, config_mode_freq=None, refresh_freq=None, reboot_if_needed=None, action_after_reboot=None, refresh_mode=None, certificate_id=None, configuration_id=None, allow_module_overwrite=None, debug_mode=False, status_retention_days=None): ''' For detailed descriptions of the parameters see: https://msdn.microsoft.com/en-us/PowerShell/DSC/metaConfig config_mode (str): How the LCM applies the configuration. Valid values are: - ApplyOnly - ApplyAndMonitor - ApplyAndAutoCorrect config_mode_freq (int): How often, in minutes, the current configuration is checked and applied. Ignored if config_mode is set to ApplyOnly. Default is 15. refresh_mode (str): How the LCM gets configurations. Valid values are: - Disabled - Push - Pull refresh_freq (int): How often, in minutes, the LCM checks for updated configurations. (pull mode only) Default is 30. reboot_if_needed (bool): Reboot the machine if needed after a configuration is applied. Default is False. action_after_reboot (str): Action to take after reboot. Valid values are: - ContinueConfiguration - StopConfiguration certificate_id (guid): A GUID that specifies a certificate used to access the configuration: (pull mode) configuration_id (guid): A GUID that identifies the config file to get from a pull server. (pull mode) allow_module_overwrite (bool): New configs are allowed to overwrite old ones on the target node. debug_mode (str): Sets the debug level. Valid values are: - None - ForceModuleImport - All status_retention_days (int): Number of days to keep status of the current config. .. note:: Either ``config_mode_freq`` or ``refresh_freq`` needs to be a multiple of the other. See documentation on MSDN for more details. Returns: bool: True if successful, otherwise False CLI Example: .. code-block:: bash salt '*' dsc.set_lcm_config ApplyOnly ''' temp_dir = os.getenv('TEMP', '{0}\\temp'.format(os.getenv('WINDIR'))) cmd = 'Configuration SaltConfig {' cmd += ' Node localhost {' cmd += ' LocalConfigurationManager {' if config_mode: if config_mode not in ('ApplyOnly', 'ApplyAndMonitor', 'ApplyAndAutoCorrect'): error = 'config_mode must be one of ApplyOnly, ApplyAndMonitor, ' \ 'or ApplyAndAutoCorrect. Passed {0}'.format(config_mode) raise SaltInvocationError(error) cmd += ' ConfigurationMode = "{0}";'.format(config_mode) if config_mode_freq: if not isinstance(config_mode_freq, int): error = 'config_mode_freq must be an integer. Passed {0}'.format( config_mode_freq ) raise SaltInvocationError(error) cmd += ' ConfigurationModeFrequencyMins = {0};'.format(config_mode_freq) if refresh_mode: if refresh_mode not in ('Disabled', 'Push', 'Pull'): raise SaltInvocationError( 'refresh_mode must be one of Disabled, Push, or Pull' ) cmd += ' RefreshMode = "{0}";'.format(refresh_mode) if refresh_freq: if not isinstance(refresh_freq, int): raise SaltInvocationError('refresh_freq must be an integer') cmd += ' RefreshFrequencyMins = {0};'.format(refresh_freq) if reboot_if_needed is not None: if not isinstance(reboot_if_needed, bool): raise SaltInvocationError('reboot_if_needed must be a boolean value') if reboot_if_needed: reboot_if_needed = '$true' else: reboot_if_needed = '$false' cmd += ' RebootNodeIfNeeded = {0};'.format(reboot_if_needed) if action_after_reboot: if action_after_reboot not in ('ContinueConfiguration', 'StopConfiguration'): raise SaltInvocationError( 'action_after_reboot must be one of ' 'ContinueConfiguration or StopConfiguration' ) cmd += ' ActionAfterReboot = "{0}"'.format(action_after_reboot) if certificate_id is not None: if certificate_id == '': certificate_id = None cmd += ' CertificateID = "{0}";'.format(certificate_id) if configuration_id is not None: if configuration_id == '': configuration_id = None cmd += ' ConfigurationID = "{0}";'.format(configuration_id) if allow_module_overwrite is not None: if not isinstance(allow_module_overwrite, bool): raise SaltInvocationError('allow_module_overwrite must be a boolean value') if allow_module_overwrite: allow_module_overwrite = '$true' else: allow_module_overwrite = '$false' cmd += ' AllowModuleOverwrite = {0};'.format(allow_module_overwrite) if debug_mode is not False: if debug_mode is None: debug_mode = 'None' if debug_mode not in ('None', 'ForceModuleImport', 'All'): raise SaltInvocationError( 'debug_mode must be one of None, ForceModuleImport, ' 'ResourceScriptBreakAll, or All' ) cmd += ' DebugMode = "{0}";'.format(debug_mode) if status_retention_days: if not isinstance(status_retention_days, int): raise SaltInvocationError('status_retention_days must be an integer') cmd += ' StatusRetentionTimeInDays = {0};'.format(status_retention_days) cmd += ' }}};' cmd += r'SaltConfig -OutputPath "{0}\SaltConfig"'.format(temp_dir) # Execute Config to create the .mof _pshell(cmd) # Apply the config cmd = r'Set-DscLocalConfigurationManager -Path "{0}\SaltConfig"' \ r''.format(temp_dir) ret = __salt__['cmd.run_all'](cmd, shell='powershell', python_shell=True) __salt__['file.remove'](r'{0}\SaltConfig'.format(temp_dir)) if not ret['retcode']: log.info('DSC: LCM config applied successfully') return True else: log.error('DSC: Failed to apply LCM config. Error %s', ret) return False
saltstack/salt
salt/modules/win_dsc.py
apply_config
python
def apply_config(path, source=None, salt_env='base'): r''' Run an compiled DSC configuration (a folder containing a .mof file). The folder can be cached from the salt master using the ``source`` option. Args: path (str): Local path to the directory that contains the .mof configuration file to apply. Required. source (str): Path to the directory that contains the .mof file on the ``file_roots``. The source directory will be copied to the path directory and then executed. If the path and source directories differ, the source directory will be applied. If source is not passed, the config located at ``path`` will be applied. Optional. salt_env (str): The salt environment to use when copying your source. Default is 'base' Returns: bool: True if successful, otherwise False CLI Example: To apply a config that already exists on the the system .. code-block:: bash salt '*' dsc.apply_config C:\\DSC\\WebSiteConfiguration To cache a configuration from the master and apply it: .. code-block:: bash salt '*' dsc.apply_config C:\\DSC\\WebSiteConfiguration salt://dsc/configs/WebSiteConfiguration ''' # If you're getting an error along the lines of "The client cannot connect # to the destination specified in the request.", try the following: # Enable-PSRemoting -SkipNetworkProfileCheck config = path if source: # Make sure the folder names match path_name = os.path.basename(os.path.normpath(path)) source_name = os.path.basename(os.path.normpath(source)) if path_name.lower() != source_name.lower(): # Append the Source name to the Path path = '{0}\\{1}'.format(path, source_name) log.debug('DSC: %s appended to the path.', source_name) # Destination path minus the basename dest_path = os.path.dirname(os.path.normpath(path)) log.info('DSC: Caching %s', source) cached_files = __salt__['cp.get_dir'](source, dest_path, salt_env) if not cached_files: error = 'Failed to copy {0}'.format(source) log.error('DSC: %s', error) raise CommandExecutionError(error) else: config = os.path.dirname(cached_files[0]) # Make sure the path exists if not os.path.exists(config): error = '{0} not found'.format(config) log.error('DSC: %s', error) raise CommandExecutionError(error) # Run the DSC Configuration # Putting quotes around the parameter protects against command injection cmd = 'Start-DscConfiguration -Path "{0}" -Wait -Force'.format(config) _pshell(cmd) cmd = '$status = Get-DscConfigurationStatus; $status.Status' ret = _pshell(cmd) log.info('DSC: Apply Config: %s', ret) return ret == 'Success' or ret == {}
r''' Run an compiled DSC configuration (a folder containing a .mof file). The folder can be cached from the salt master using the ``source`` option. Args: path (str): Local path to the directory that contains the .mof configuration file to apply. Required. source (str): Path to the directory that contains the .mof file on the ``file_roots``. The source directory will be copied to the path directory and then executed. If the path and source directories differ, the source directory will be applied. If source is not passed, the config located at ``path`` will be applied. Optional. salt_env (str): The salt environment to use when copying your source. Default is 'base' Returns: bool: True if successful, otherwise False CLI Example: To apply a config that already exists on the the system .. code-block:: bash salt '*' dsc.apply_config C:\\DSC\\WebSiteConfiguration To cache a configuration from the master and apply it: .. code-block:: bash salt '*' dsc.apply_config C:\\DSC\\WebSiteConfiguration salt://dsc/configs/WebSiteConfiguration
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_dsc.py#L332-L408
[ "def _pshell(cmd, cwd=None, json_depth=2, ignore_retcode=False):\n '''\n Execute the desired PowerShell command and ensure that it returns data\n in json format and load that into python. Either return a dict or raise a\n CommandExecutionError.\n '''\n if 'convertto-json' not in cmd.lower():\n cmd = '{0} | ConvertTo-Json -Depth {1}'.format(cmd, json_depth)\n log.debug('DSC: %s', cmd)\n results = __salt__['cmd.run_all'](\n cmd, shell='powershell', cwd=cwd, python_shell=True,\n ignore_retcode=ignore_retcode)\n\n if 'pid' in results:\n del results['pid']\n\n if 'retcode' not in results or results['retcode'] != 0:\n # run_all logs an error to log.error, fail hard back to the user\n raise CommandExecutionError(\n 'Issue executing PowerShell {0}'.format(cmd), info=results)\n\n # Sometimes Powershell returns an empty string, which isn't valid JSON\n if results['stdout'] == '':\n results['stdout'] = '{}'\n\n try:\n ret = salt.utils.json.loads(results['stdout'], strict=False)\n except ValueError:\n raise CommandExecutionError(\n 'No JSON results from PowerShell', info=results)\n\n log.info('DSC: Returning \"%s\"', ret)\n return ret\n" ]
# -*- coding: utf-8 -*- ''' Module for working with Windows PowerShell DSC (Desired State Configuration) This module is Alpha This module applies DSC Configurations in the form of PowerShell scripts or MOF (Managed Object Format) schema files. Use the ``psget`` module to manage PowerShell resources. The idea is to leverage Salt to push DSC configuration scripts or MOF files to the Minion. :depends: - PowerShell 5.0 ''' from __future__ import absolute_import, unicode_literals, print_function # Import Python libs import logging import os # Import Salt libs import salt.utils.json import salt.utils.platform import salt.utils.versions from salt.exceptions import CommandExecutionError, SaltInvocationError # Set up logging log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'dsc' def __virtual__(): ''' Set the system module of the kernel is Windows ''' # Verify Windows if not salt.utils.platform.is_windows(): log.debug('DSC: Only available on Windows systems') return False, 'DSC: Only available on Windows systems' # Verify PowerShell powershell_info = __salt__['cmd.shell_info']('powershell') if not powershell_info['installed']: log.debug('DSC: Requires PowerShell') return False, 'DSC: Requires PowerShell' # Verify PowerShell 5.0 or greater if salt.utils.versions.compare(powershell_info['version'], '<', '5.0'): log.debug('DSC: Requires PowerShell 5 or later') return False, 'DSC: Requires PowerShell 5 or later' return __virtualname__ def _pshell(cmd, cwd=None, json_depth=2, ignore_retcode=False): ''' Execute the desired PowerShell command and ensure that it returns data in json format and load that into python. Either return a dict or raise a CommandExecutionError. ''' if 'convertto-json' not in cmd.lower(): cmd = '{0} | ConvertTo-Json -Depth {1}'.format(cmd, json_depth) log.debug('DSC: %s', cmd) results = __salt__['cmd.run_all']( cmd, shell='powershell', cwd=cwd, python_shell=True, ignore_retcode=ignore_retcode) if 'pid' in results: del results['pid'] if 'retcode' not in results or results['retcode'] != 0: # run_all logs an error to log.error, fail hard back to the user raise CommandExecutionError( 'Issue executing PowerShell {0}'.format(cmd), info=results) # Sometimes Powershell returns an empty string, which isn't valid JSON if results['stdout'] == '': results['stdout'] = '{}' try: ret = salt.utils.json.loads(results['stdout'], strict=False) except ValueError: raise CommandExecutionError( 'No JSON results from PowerShell', info=results) log.info('DSC: Returning "%s"', ret) return ret def run_config(path, source=None, config_name=None, config_data=None, config_data_source=None, script_parameters=None, salt_env='base'): r''' Compile a DSC Configuration in the form of a PowerShell script (.ps1) and apply it. The PowerShell script can be cached from the master using the ``source`` option. If there is more than one config within the PowerShell script, the desired configuration can be applied by passing the name in the ``config`` option. This command would be the equivalent of running ``dsc.compile_config`` followed by ``dsc.apply_config``. Args: path (str): The local path to the PowerShell script that contains the DSC Configuration. Required. source (str): The path to the script on ``file_roots`` to cache at the location specified by ``path``. The source file will be cached locally and then executed. If source is not passed, the config script located at ``path`` will be compiled. Optional. config_name (str): The name of the Configuration within the script to apply. If the script contains multiple configurations within the file a ``config_name`` must be specified. If the ``config_name`` is not specified, the name of the file will be used as the ``config_name`` to run. Optional. config_data (str): Configuration data in the form of a hash table that will be passed to the ``ConfigurationData`` parameter when the ``config_name`` is compiled. This can be the path to a ``.psd1`` file containing the proper hash table or the PowerShell code to create the hash table. .. versionadded:: 2017.7.0 config_data_source (str): The path to the ``.psd1`` file on ``file_roots`` to cache at the location specified by ``config_data``. If this is specified, ``config_data`` must be a local path instead of a hash table. .. versionadded:: 2017.7.0 script_parameters (str): Any additional parameters expected by the configuration script. These must be defined in the script itself. .. versionadded:: 2017.7.0 salt_env (str): The salt environment to use when copying the source. Default is 'base' Returns: bool: True if successfully compiled and applied, otherwise False CLI Example: To compile a config from a script that already exists on the system: .. code-block:: bash salt '*' dsc.run_config C:\\DSC\\WebsiteConfig.ps1 To cache a config script to the system from the master and compile it: .. code-block:: bash salt '*' dsc.run_config C:\\DSC\\WebsiteConfig.ps1 salt://dsc/configs/WebsiteConfig.ps1 ''' ret = compile_config(path=path, source=source, config_name=config_name, config_data=config_data, config_data_source=config_data_source, script_parameters=script_parameters, salt_env=salt_env) if ret.get('Exists'): config_path = os.path.dirname(ret['FullName']) return apply_config(config_path) else: return False def compile_config(path, source=None, config_name=None, config_data=None, config_data_source=None, script_parameters=None, salt_env='base'): r''' Compile a config from a PowerShell script (``.ps1``) Args: path (str): Path (local) to the script that will create the ``.mof`` configuration file. If no source is passed, the file must exist locally. Required. source (str): Path to the script on ``file_roots`` to cache at the location specified by ``path``. The source file will be cached locally and then executed. If source is not passed, the config script located at ``path`` will be compiled. Optional. config_name (str): The name of the Configuration within the script to apply. If the script contains multiple configurations within the file a ``config_name`` must be specified. If the ``config_name`` is not specified, the name of the file will be used as the ``config_name`` to run. Optional. config_data (str): Configuration data in the form of a hash table that will be passed to the ``ConfigurationData`` parameter when the ``config_name`` is compiled. This can be the path to a ``.psd1`` file containing the proper hash table or the PowerShell code to create the hash table. .. versionadded:: 2017.7.0 config_data_source (str): The path to the ``.psd1`` file on ``file_roots`` to cache at the location specified by ``config_data``. If this is specified, ``config_data`` must be a local path instead of a hash table. .. versionadded:: 2017.7.0 script_parameters (str): Any additional parameters expected by the configuration script. These must be defined in the script itself. .. versionadded:: 2017.7.0 salt_env (str): The salt environment to use when copying the source. Default is 'base' Returns: dict: A dictionary containing the results of the compilation CLI Example: To compile a config from a script that already exists on the system: .. code-block:: bash salt '*' dsc.compile_config C:\\DSC\\WebsiteConfig.ps1 To cache a config script to the system from the master and compile it: .. code-block:: bash salt '*' dsc.compile_config C:\\DSC\\WebsiteConfig.ps1 salt://dsc/configs/WebsiteConfig.ps1 ''' if source: log.info('DSC: Caching %s', source) cached_files = __salt__['cp.get_file'](path=source, dest=path, saltenv=salt_env, makedirs=True) if not cached_files: error = 'Failed to cache {0}'.format(source) log.error('DSC: %s', error) raise CommandExecutionError(error) if config_data_source: log.info('DSC: Caching %s', config_data_source) cached_files = __salt__['cp.get_file'](path=config_data_source, dest=config_data, saltenv=salt_env, makedirs=True) if not cached_files: error = 'Failed to cache {0}'.format(config_data_source) log.error('DSC: %s', error) raise CommandExecutionError(error) # Make sure the path exists if not os.path.exists(path): error = '"{0}" not found'.format(path) log.error('DSC: %s', error) raise CommandExecutionError(error) if config_name is None: # If the name of the config isn't passed, make it the name of the .ps1 config_name = os.path.splitext(os.path.basename(path))[0] cwd = os.path.dirname(path) # Run the script and see if the compile command is in the script cmd = [path] # Add any script parameters if script_parameters: cmd.append(script_parameters) # Select fields to return cmd.append('| Select-Object -Property FullName, Extension, Exists, ' '@{Name="LastWriteTime";Expression={Get-Date ($_.LastWriteTime) ' '-Format g}}') cmd = ' '.join(cmd) ret = _pshell(cmd, cwd) if ret: # Script compiled, return results if ret.get('Exists'): log.info('DSC: Compile Config: %s', ret) return ret # If you get to this point, the script did not contain a compile command # dot source the script to compile the state and generate the mof file cmd = ['.', path] if script_parameters: cmd.append(script_parameters) cmd.extend([';', config_name]) if config_data: cmd.append(config_data) cmd.append('| Select-Object -Property FullName, Extension, Exists, ' '@{Name="LastWriteTime";Expression={Get-Date ($_.LastWriteTime) ' '-Format g}}') cmd = ' '.join(cmd) ret = _pshell(cmd, cwd) if ret: # Script compiled, return results if ret.get('Exists'): log.info('DSC: Compile Config: %s', ret) return ret error = 'Failed to compile config: {0}'.format(path) error += '\nReturned: {0}'.format(ret) log.error('DSC: %s', error) raise CommandExecutionError(error) def get_config(): ''' Get the current DSC Configuration Returns: dict: A dictionary representing the DSC Configuration on the machine Raises: CommandExecutionError: On failure CLI Example: .. code-block:: bash salt '*' dsc.get_config ''' cmd = 'Get-DscConfiguration | Select-Object * -ExcludeProperty Cim*' try: raw_config = _pshell(cmd, ignore_retcode=True) except CommandExecutionError as exc: if 'Current configuration does not exist' in exc.info['stderr']: raise CommandExecutionError('Not Configured') raise config = dict() if raw_config: # Get DSC Configuration Name if 'ConfigurationName' in raw_config[0]: config[raw_config[0]['ConfigurationName']] = {} # Add all DSC Configurations by ResourceId for item in raw_config: config[item['ConfigurationName']][item['ResourceId']] = {} for key in item: if key not in ['ConfigurationName', 'ResourceId']: config[item['ConfigurationName']][item['ResourceId']][key] = item[key] return config def remove_config(reset=False): ''' Remove the current DSC Configuration. Removes current, pending, and previous dsc configurations. .. versionadded:: 2017.7.5 Args: reset (bool): Attempts to reset the DSC configuration by removing the following from ``C:\\Windows\\System32\\Configuration``: - File: DSCStatusHistory.mof - File: DSCEngineCache.mof - Dir: ConfigurationStatus Default is False .. warning:: ``remove_config`` may fail to reset the DSC environment if any of the files in the ``ConfigurationStatus`` directory. If you wait a few minutes and run again, it may complete successfully. Returns: bool: True if successful Raises: CommandExecutionError: On failure CLI Example: .. code-block:: bash salt '*' dsc.remove_config True ''' # Stopping a running config (not likely to occur) cmd = 'Stop-DscConfiguration' log.info('DSC: Stopping Running Configuration') try: _pshell(cmd) except CommandExecutionError as exc: if exc.info['retcode'] != 0: raise CommandExecutionError('Failed to Stop DSC Configuration', info=exc.info) log.info('DSC: %s', exc.info['stdout']) # Remove configuration files cmd = 'Remove-DscConfigurationDocument -Stage Current, Pending, Previous ' \ '-Force' log.info('DSC: Removing Configuration') try: _pshell(cmd) except CommandExecutionError as exc: if exc.info['retcode'] != 0: raise CommandExecutionError('Failed to remove DSC Configuration', info=exc.info) log.info('DSC: %s', exc.info['stdout']) if not reset: return True def _remove_fs_obj(path): if os.path.exists(path): log.info('DSC: Removing %s', path) if not __salt__['file.remove'](path): error = 'Failed to remove {0}'.format(path) log.error('DSC: %s', error) raise CommandExecutionError(error) dsc_config_dir = '{0}\\System32\\Configuration' \ ''.format(os.getenv('SystemRoot', 'C:\\Windows')) # Remove History _remove_fs_obj('{0}\\DSCStatusHistory.mof'.format(dsc_config_dir)) # Remove Engine Cache _remove_fs_obj('{0}\\DSCEngineCache.mof'.format(dsc_config_dir)) # Remove Status Directory _remove_fs_obj('{0}\\ConfigurationStatus'.format(dsc_config_dir)) return True def restore_config(): ''' Reapplies the previous configuration. .. versionadded:: 2017.7.5 .. note:: The current configuration will be come the previous configuration. If run a second time back-to-back it is like toggling between two configs. Returns: bool: True if successfully restored Raises: CommandExecutionError: On failure CLI Example: .. code-block:: bash salt '*' dsc.restore_config ''' cmd = 'Restore-DscConfiguration' try: _pshell(cmd, ignore_retcode=True) except CommandExecutionError as exc: if 'A previous configuration does not exist' in exc.info['stderr']: raise CommandExecutionError('Previous Configuration Not Found') raise return True def test_config(): ''' Tests the current applied DSC Configuration Returns: bool: True if successfully applied, otherwise False CLI Example: .. code-block:: bash salt '*' dsc.test_config ''' cmd = 'Test-DscConfiguration' try: _pshell(cmd, ignore_retcode=True) except CommandExecutionError as exc: if 'Current configuration does not exist' in exc.info['stderr']: raise CommandExecutionError('Not Configured') raise def get_config_status(): ''' Get the status of the current DSC Configuration Returns: dict: A dictionary representing the status of the current DSC Configuration on the machine CLI Example: .. code-block:: bash salt '*' dsc.get_config_status ''' cmd = 'Get-DscConfigurationStatus | ' \ 'Select-Object -Property HostName, Status, MetaData, ' \ '@{Name="StartDate";Expression={Get-Date ($_.StartDate) -Format g}}, ' \ 'Type, Mode, RebootRequested, NumberofResources' try: return _pshell(cmd, ignore_retcode=True) except CommandExecutionError as exc: if 'No status information available' in exc.info['stderr']: raise CommandExecutionError('Not Configured') raise def get_lcm_config(): ''' Get the current Local Configuration Manager settings Returns: dict: A dictionary representing the Local Configuration Manager settings on the machine CLI Example: .. code-block:: bash salt '*' dsc.get_lcm_config ''' cmd = 'Get-DscLocalConfigurationManager | ' \ 'Select-Object -Property ConfigurationModeFrequencyMins, LCMState, ' \ 'RebootNodeIfNeeded, ConfigurationMode, ActionAfterReboot, ' \ 'RefreshMode, CertificateID, ConfigurationID, RefreshFrequencyMins, ' \ 'AllowModuleOverwrite, DebugMode, StatusRetentionTimeInDays ' return _pshell(cmd) def set_lcm_config(config_mode=None, config_mode_freq=None, refresh_freq=None, reboot_if_needed=None, action_after_reboot=None, refresh_mode=None, certificate_id=None, configuration_id=None, allow_module_overwrite=None, debug_mode=False, status_retention_days=None): ''' For detailed descriptions of the parameters see: https://msdn.microsoft.com/en-us/PowerShell/DSC/metaConfig config_mode (str): How the LCM applies the configuration. Valid values are: - ApplyOnly - ApplyAndMonitor - ApplyAndAutoCorrect config_mode_freq (int): How often, in minutes, the current configuration is checked and applied. Ignored if config_mode is set to ApplyOnly. Default is 15. refresh_mode (str): How the LCM gets configurations. Valid values are: - Disabled - Push - Pull refresh_freq (int): How often, in minutes, the LCM checks for updated configurations. (pull mode only) Default is 30. reboot_if_needed (bool): Reboot the machine if needed after a configuration is applied. Default is False. action_after_reboot (str): Action to take after reboot. Valid values are: - ContinueConfiguration - StopConfiguration certificate_id (guid): A GUID that specifies a certificate used to access the configuration: (pull mode) configuration_id (guid): A GUID that identifies the config file to get from a pull server. (pull mode) allow_module_overwrite (bool): New configs are allowed to overwrite old ones on the target node. debug_mode (str): Sets the debug level. Valid values are: - None - ForceModuleImport - All status_retention_days (int): Number of days to keep status of the current config. .. note:: Either ``config_mode_freq`` or ``refresh_freq`` needs to be a multiple of the other. See documentation on MSDN for more details. Returns: bool: True if successful, otherwise False CLI Example: .. code-block:: bash salt '*' dsc.set_lcm_config ApplyOnly ''' temp_dir = os.getenv('TEMP', '{0}\\temp'.format(os.getenv('WINDIR'))) cmd = 'Configuration SaltConfig {' cmd += ' Node localhost {' cmd += ' LocalConfigurationManager {' if config_mode: if config_mode not in ('ApplyOnly', 'ApplyAndMonitor', 'ApplyAndAutoCorrect'): error = 'config_mode must be one of ApplyOnly, ApplyAndMonitor, ' \ 'or ApplyAndAutoCorrect. Passed {0}'.format(config_mode) raise SaltInvocationError(error) cmd += ' ConfigurationMode = "{0}";'.format(config_mode) if config_mode_freq: if not isinstance(config_mode_freq, int): error = 'config_mode_freq must be an integer. Passed {0}'.format( config_mode_freq ) raise SaltInvocationError(error) cmd += ' ConfigurationModeFrequencyMins = {0};'.format(config_mode_freq) if refresh_mode: if refresh_mode not in ('Disabled', 'Push', 'Pull'): raise SaltInvocationError( 'refresh_mode must be one of Disabled, Push, or Pull' ) cmd += ' RefreshMode = "{0}";'.format(refresh_mode) if refresh_freq: if not isinstance(refresh_freq, int): raise SaltInvocationError('refresh_freq must be an integer') cmd += ' RefreshFrequencyMins = {0};'.format(refresh_freq) if reboot_if_needed is not None: if not isinstance(reboot_if_needed, bool): raise SaltInvocationError('reboot_if_needed must be a boolean value') if reboot_if_needed: reboot_if_needed = '$true' else: reboot_if_needed = '$false' cmd += ' RebootNodeIfNeeded = {0};'.format(reboot_if_needed) if action_after_reboot: if action_after_reboot not in ('ContinueConfiguration', 'StopConfiguration'): raise SaltInvocationError( 'action_after_reboot must be one of ' 'ContinueConfiguration or StopConfiguration' ) cmd += ' ActionAfterReboot = "{0}"'.format(action_after_reboot) if certificate_id is not None: if certificate_id == '': certificate_id = None cmd += ' CertificateID = "{0}";'.format(certificate_id) if configuration_id is not None: if configuration_id == '': configuration_id = None cmd += ' ConfigurationID = "{0}";'.format(configuration_id) if allow_module_overwrite is not None: if not isinstance(allow_module_overwrite, bool): raise SaltInvocationError('allow_module_overwrite must be a boolean value') if allow_module_overwrite: allow_module_overwrite = '$true' else: allow_module_overwrite = '$false' cmd += ' AllowModuleOverwrite = {0};'.format(allow_module_overwrite) if debug_mode is not False: if debug_mode is None: debug_mode = 'None' if debug_mode not in ('None', 'ForceModuleImport', 'All'): raise SaltInvocationError( 'debug_mode must be one of None, ForceModuleImport, ' 'ResourceScriptBreakAll, or All' ) cmd += ' DebugMode = "{0}";'.format(debug_mode) if status_retention_days: if not isinstance(status_retention_days, int): raise SaltInvocationError('status_retention_days must be an integer') cmd += ' StatusRetentionTimeInDays = {0};'.format(status_retention_days) cmd += ' }}};' cmd += r'SaltConfig -OutputPath "{0}\SaltConfig"'.format(temp_dir) # Execute Config to create the .mof _pshell(cmd) # Apply the config cmd = r'Set-DscLocalConfigurationManager -Path "{0}\SaltConfig"' \ r''.format(temp_dir) ret = __salt__['cmd.run_all'](cmd, shell='powershell', python_shell=True) __salt__['file.remove'](r'{0}\SaltConfig'.format(temp_dir)) if not ret['retcode']: log.info('DSC: LCM config applied successfully') return True else: log.error('DSC: Failed to apply LCM config. Error %s', ret) return False
saltstack/salt
salt/modules/win_dsc.py
get_config
python
def get_config(): ''' Get the current DSC Configuration Returns: dict: A dictionary representing the DSC Configuration on the machine Raises: CommandExecutionError: On failure CLI Example: .. code-block:: bash salt '*' dsc.get_config ''' cmd = 'Get-DscConfiguration | Select-Object * -ExcludeProperty Cim*' try: raw_config = _pshell(cmd, ignore_retcode=True) except CommandExecutionError as exc: if 'Current configuration does not exist' in exc.info['stderr']: raise CommandExecutionError('Not Configured') raise config = dict() if raw_config: # Get DSC Configuration Name if 'ConfigurationName' in raw_config[0]: config[raw_config[0]['ConfigurationName']] = {} # Add all DSC Configurations by ResourceId for item in raw_config: config[item['ConfigurationName']][item['ResourceId']] = {} for key in item: if key not in ['ConfigurationName', 'ResourceId']: config[item['ConfigurationName']][item['ResourceId']][key] = item[key] return config
Get the current DSC Configuration Returns: dict: A dictionary representing the DSC Configuration on the machine Raises: CommandExecutionError: On failure CLI Example: .. code-block:: bash salt '*' dsc.get_config
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_dsc.py#L411-L448
[ "def _pshell(cmd, cwd=None, json_depth=2, ignore_retcode=False):\n '''\n Execute the desired PowerShell command and ensure that it returns data\n in json format and load that into python. Either return a dict or raise a\n CommandExecutionError.\n '''\n if 'convertto-json' not in cmd.lower():\n cmd = '{0} | ConvertTo-Json -Depth {1}'.format(cmd, json_depth)\n log.debug('DSC: %s', cmd)\n results = __salt__['cmd.run_all'](\n cmd, shell='powershell', cwd=cwd, python_shell=True,\n ignore_retcode=ignore_retcode)\n\n if 'pid' in results:\n del results['pid']\n\n if 'retcode' not in results or results['retcode'] != 0:\n # run_all logs an error to log.error, fail hard back to the user\n raise CommandExecutionError(\n 'Issue executing PowerShell {0}'.format(cmd), info=results)\n\n # Sometimes Powershell returns an empty string, which isn't valid JSON\n if results['stdout'] == '':\n results['stdout'] = '{}'\n\n try:\n ret = salt.utils.json.loads(results['stdout'], strict=False)\n except ValueError:\n raise CommandExecutionError(\n 'No JSON results from PowerShell', info=results)\n\n log.info('DSC: Returning \"%s\"', ret)\n return ret\n" ]
# -*- coding: utf-8 -*- ''' Module for working with Windows PowerShell DSC (Desired State Configuration) This module is Alpha This module applies DSC Configurations in the form of PowerShell scripts or MOF (Managed Object Format) schema files. Use the ``psget`` module to manage PowerShell resources. The idea is to leverage Salt to push DSC configuration scripts or MOF files to the Minion. :depends: - PowerShell 5.0 ''' from __future__ import absolute_import, unicode_literals, print_function # Import Python libs import logging import os # Import Salt libs import salt.utils.json import salt.utils.platform import salt.utils.versions from salt.exceptions import CommandExecutionError, SaltInvocationError # Set up logging log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'dsc' def __virtual__(): ''' Set the system module of the kernel is Windows ''' # Verify Windows if not salt.utils.platform.is_windows(): log.debug('DSC: Only available on Windows systems') return False, 'DSC: Only available on Windows systems' # Verify PowerShell powershell_info = __salt__['cmd.shell_info']('powershell') if not powershell_info['installed']: log.debug('DSC: Requires PowerShell') return False, 'DSC: Requires PowerShell' # Verify PowerShell 5.0 or greater if salt.utils.versions.compare(powershell_info['version'], '<', '5.0'): log.debug('DSC: Requires PowerShell 5 or later') return False, 'DSC: Requires PowerShell 5 or later' return __virtualname__ def _pshell(cmd, cwd=None, json_depth=2, ignore_retcode=False): ''' Execute the desired PowerShell command and ensure that it returns data in json format and load that into python. Either return a dict or raise a CommandExecutionError. ''' if 'convertto-json' not in cmd.lower(): cmd = '{0} | ConvertTo-Json -Depth {1}'.format(cmd, json_depth) log.debug('DSC: %s', cmd) results = __salt__['cmd.run_all']( cmd, shell='powershell', cwd=cwd, python_shell=True, ignore_retcode=ignore_retcode) if 'pid' in results: del results['pid'] if 'retcode' not in results or results['retcode'] != 0: # run_all logs an error to log.error, fail hard back to the user raise CommandExecutionError( 'Issue executing PowerShell {0}'.format(cmd), info=results) # Sometimes Powershell returns an empty string, which isn't valid JSON if results['stdout'] == '': results['stdout'] = '{}' try: ret = salt.utils.json.loads(results['stdout'], strict=False) except ValueError: raise CommandExecutionError( 'No JSON results from PowerShell', info=results) log.info('DSC: Returning "%s"', ret) return ret def run_config(path, source=None, config_name=None, config_data=None, config_data_source=None, script_parameters=None, salt_env='base'): r''' Compile a DSC Configuration in the form of a PowerShell script (.ps1) and apply it. The PowerShell script can be cached from the master using the ``source`` option. If there is more than one config within the PowerShell script, the desired configuration can be applied by passing the name in the ``config`` option. This command would be the equivalent of running ``dsc.compile_config`` followed by ``dsc.apply_config``. Args: path (str): The local path to the PowerShell script that contains the DSC Configuration. Required. source (str): The path to the script on ``file_roots`` to cache at the location specified by ``path``. The source file will be cached locally and then executed. If source is not passed, the config script located at ``path`` will be compiled. Optional. config_name (str): The name of the Configuration within the script to apply. If the script contains multiple configurations within the file a ``config_name`` must be specified. If the ``config_name`` is not specified, the name of the file will be used as the ``config_name`` to run. Optional. config_data (str): Configuration data in the form of a hash table that will be passed to the ``ConfigurationData`` parameter when the ``config_name`` is compiled. This can be the path to a ``.psd1`` file containing the proper hash table or the PowerShell code to create the hash table. .. versionadded:: 2017.7.0 config_data_source (str): The path to the ``.psd1`` file on ``file_roots`` to cache at the location specified by ``config_data``. If this is specified, ``config_data`` must be a local path instead of a hash table. .. versionadded:: 2017.7.0 script_parameters (str): Any additional parameters expected by the configuration script. These must be defined in the script itself. .. versionadded:: 2017.7.0 salt_env (str): The salt environment to use when copying the source. Default is 'base' Returns: bool: True if successfully compiled and applied, otherwise False CLI Example: To compile a config from a script that already exists on the system: .. code-block:: bash salt '*' dsc.run_config C:\\DSC\\WebsiteConfig.ps1 To cache a config script to the system from the master and compile it: .. code-block:: bash salt '*' dsc.run_config C:\\DSC\\WebsiteConfig.ps1 salt://dsc/configs/WebsiteConfig.ps1 ''' ret = compile_config(path=path, source=source, config_name=config_name, config_data=config_data, config_data_source=config_data_source, script_parameters=script_parameters, salt_env=salt_env) if ret.get('Exists'): config_path = os.path.dirname(ret['FullName']) return apply_config(config_path) else: return False def compile_config(path, source=None, config_name=None, config_data=None, config_data_source=None, script_parameters=None, salt_env='base'): r''' Compile a config from a PowerShell script (``.ps1``) Args: path (str): Path (local) to the script that will create the ``.mof`` configuration file. If no source is passed, the file must exist locally. Required. source (str): Path to the script on ``file_roots`` to cache at the location specified by ``path``. The source file will be cached locally and then executed. If source is not passed, the config script located at ``path`` will be compiled. Optional. config_name (str): The name of the Configuration within the script to apply. If the script contains multiple configurations within the file a ``config_name`` must be specified. If the ``config_name`` is not specified, the name of the file will be used as the ``config_name`` to run. Optional. config_data (str): Configuration data in the form of a hash table that will be passed to the ``ConfigurationData`` parameter when the ``config_name`` is compiled. This can be the path to a ``.psd1`` file containing the proper hash table or the PowerShell code to create the hash table. .. versionadded:: 2017.7.0 config_data_source (str): The path to the ``.psd1`` file on ``file_roots`` to cache at the location specified by ``config_data``. If this is specified, ``config_data`` must be a local path instead of a hash table. .. versionadded:: 2017.7.0 script_parameters (str): Any additional parameters expected by the configuration script. These must be defined in the script itself. .. versionadded:: 2017.7.0 salt_env (str): The salt environment to use when copying the source. Default is 'base' Returns: dict: A dictionary containing the results of the compilation CLI Example: To compile a config from a script that already exists on the system: .. code-block:: bash salt '*' dsc.compile_config C:\\DSC\\WebsiteConfig.ps1 To cache a config script to the system from the master and compile it: .. code-block:: bash salt '*' dsc.compile_config C:\\DSC\\WebsiteConfig.ps1 salt://dsc/configs/WebsiteConfig.ps1 ''' if source: log.info('DSC: Caching %s', source) cached_files = __salt__['cp.get_file'](path=source, dest=path, saltenv=salt_env, makedirs=True) if not cached_files: error = 'Failed to cache {0}'.format(source) log.error('DSC: %s', error) raise CommandExecutionError(error) if config_data_source: log.info('DSC: Caching %s', config_data_source) cached_files = __salt__['cp.get_file'](path=config_data_source, dest=config_data, saltenv=salt_env, makedirs=True) if not cached_files: error = 'Failed to cache {0}'.format(config_data_source) log.error('DSC: %s', error) raise CommandExecutionError(error) # Make sure the path exists if not os.path.exists(path): error = '"{0}" not found'.format(path) log.error('DSC: %s', error) raise CommandExecutionError(error) if config_name is None: # If the name of the config isn't passed, make it the name of the .ps1 config_name = os.path.splitext(os.path.basename(path))[0] cwd = os.path.dirname(path) # Run the script and see if the compile command is in the script cmd = [path] # Add any script parameters if script_parameters: cmd.append(script_parameters) # Select fields to return cmd.append('| Select-Object -Property FullName, Extension, Exists, ' '@{Name="LastWriteTime";Expression={Get-Date ($_.LastWriteTime) ' '-Format g}}') cmd = ' '.join(cmd) ret = _pshell(cmd, cwd) if ret: # Script compiled, return results if ret.get('Exists'): log.info('DSC: Compile Config: %s', ret) return ret # If you get to this point, the script did not contain a compile command # dot source the script to compile the state and generate the mof file cmd = ['.', path] if script_parameters: cmd.append(script_parameters) cmd.extend([';', config_name]) if config_data: cmd.append(config_data) cmd.append('| Select-Object -Property FullName, Extension, Exists, ' '@{Name="LastWriteTime";Expression={Get-Date ($_.LastWriteTime) ' '-Format g}}') cmd = ' '.join(cmd) ret = _pshell(cmd, cwd) if ret: # Script compiled, return results if ret.get('Exists'): log.info('DSC: Compile Config: %s', ret) return ret error = 'Failed to compile config: {0}'.format(path) error += '\nReturned: {0}'.format(ret) log.error('DSC: %s', error) raise CommandExecutionError(error) def apply_config(path, source=None, salt_env='base'): r''' Run an compiled DSC configuration (a folder containing a .mof file). The folder can be cached from the salt master using the ``source`` option. Args: path (str): Local path to the directory that contains the .mof configuration file to apply. Required. source (str): Path to the directory that contains the .mof file on the ``file_roots``. The source directory will be copied to the path directory and then executed. If the path and source directories differ, the source directory will be applied. If source is not passed, the config located at ``path`` will be applied. Optional. salt_env (str): The salt environment to use when copying your source. Default is 'base' Returns: bool: True if successful, otherwise False CLI Example: To apply a config that already exists on the the system .. code-block:: bash salt '*' dsc.apply_config C:\\DSC\\WebSiteConfiguration To cache a configuration from the master and apply it: .. code-block:: bash salt '*' dsc.apply_config C:\\DSC\\WebSiteConfiguration salt://dsc/configs/WebSiteConfiguration ''' # If you're getting an error along the lines of "The client cannot connect # to the destination specified in the request.", try the following: # Enable-PSRemoting -SkipNetworkProfileCheck config = path if source: # Make sure the folder names match path_name = os.path.basename(os.path.normpath(path)) source_name = os.path.basename(os.path.normpath(source)) if path_name.lower() != source_name.lower(): # Append the Source name to the Path path = '{0}\\{1}'.format(path, source_name) log.debug('DSC: %s appended to the path.', source_name) # Destination path minus the basename dest_path = os.path.dirname(os.path.normpath(path)) log.info('DSC: Caching %s', source) cached_files = __salt__['cp.get_dir'](source, dest_path, salt_env) if not cached_files: error = 'Failed to copy {0}'.format(source) log.error('DSC: %s', error) raise CommandExecutionError(error) else: config = os.path.dirname(cached_files[0]) # Make sure the path exists if not os.path.exists(config): error = '{0} not found'.format(config) log.error('DSC: %s', error) raise CommandExecutionError(error) # Run the DSC Configuration # Putting quotes around the parameter protects against command injection cmd = 'Start-DscConfiguration -Path "{0}" -Wait -Force'.format(config) _pshell(cmd) cmd = '$status = Get-DscConfigurationStatus; $status.Status' ret = _pshell(cmd) log.info('DSC: Apply Config: %s', ret) return ret == 'Success' or ret == {} def remove_config(reset=False): ''' Remove the current DSC Configuration. Removes current, pending, and previous dsc configurations. .. versionadded:: 2017.7.5 Args: reset (bool): Attempts to reset the DSC configuration by removing the following from ``C:\\Windows\\System32\\Configuration``: - File: DSCStatusHistory.mof - File: DSCEngineCache.mof - Dir: ConfigurationStatus Default is False .. warning:: ``remove_config`` may fail to reset the DSC environment if any of the files in the ``ConfigurationStatus`` directory. If you wait a few minutes and run again, it may complete successfully. Returns: bool: True if successful Raises: CommandExecutionError: On failure CLI Example: .. code-block:: bash salt '*' dsc.remove_config True ''' # Stopping a running config (not likely to occur) cmd = 'Stop-DscConfiguration' log.info('DSC: Stopping Running Configuration') try: _pshell(cmd) except CommandExecutionError as exc: if exc.info['retcode'] != 0: raise CommandExecutionError('Failed to Stop DSC Configuration', info=exc.info) log.info('DSC: %s', exc.info['stdout']) # Remove configuration files cmd = 'Remove-DscConfigurationDocument -Stage Current, Pending, Previous ' \ '-Force' log.info('DSC: Removing Configuration') try: _pshell(cmd) except CommandExecutionError as exc: if exc.info['retcode'] != 0: raise CommandExecutionError('Failed to remove DSC Configuration', info=exc.info) log.info('DSC: %s', exc.info['stdout']) if not reset: return True def _remove_fs_obj(path): if os.path.exists(path): log.info('DSC: Removing %s', path) if not __salt__['file.remove'](path): error = 'Failed to remove {0}'.format(path) log.error('DSC: %s', error) raise CommandExecutionError(error) dsc_config_dir = '{0}\\System32\\Configuration' \ ''.format(os.getenv('SystemRoot', 'C:\\Windows')) # Remove History _remove_fs_obj('{0}\\DSCStatusHistory.mof'.format(dsc_config_dir)) # Remove Engine Cache _remove_fs_obj('{0}\\DSCEngineCache.mof'.format(dsc_config_dir)) # Remove Status Directory _remove_fs_obj('{0}\\ConfigurationStatus'.format(dsc_config_dir)) return True def restore_config(): ''' Reapplies the previous configuration. .. versionadded:: 2017.7.5 .. note:: The current configuration will be come the previous configuration. If run a second time back-to-back it is like toggling between two configs. Returns: bool: True if successfully restored Raises: CommandExecutionError: On failure CLI Example: .. code-block:: bash salt '*' dsc.restore_config ''' cmd = 'Restore-DscConfiguration' try: _pshell(cmd, ignore_retcode=True) except CommandExecutionError as exc: if 'A previous configuration does not exist' in exc.info['stderr']: raise CommandExecutionError('Previous Configuration Not Found') raise return True def test_config(): ''' Tests the current applied DSC Configuration Returns: bool: True if successfully applied, otherwise False CLI Example: .. code-block:: bash salt '*' dsc.test_config ''' cmd = 'Test-DscConfiguration' try: _pshell(cmd, ignore_retcode=True) except CommandExecutionError as exc: if 'Current configuration does not exist' in exc.info['stderr']: raise CommandExecutionError('Not Configured') raise def get_config_status(): ''' Get the status of the current DSC Configuration Returns: dict: A dictionary representing the status of the current DSC Configuration on the machine CLI Example: .. code-block:: bash salt '*' dsc.get_config_status ''' cmd = 'Get-DscConfigurationStatus | ' \ 'Select-Object -Property HostName, Status, MetaData, ' \ '@{Name="StartDate";Expression={Get-Date ($_.StartDate) -Format g}}, ' \ 'Type, Mode, RebootRequested, NumberofResources' try: return _pshell(cmd, ignore_retcode=True) except CommandExecutionError as exc: if 'No status information available' in exc.info['stderr']: raise CommandExecutionError('Not Configured') raise def get_lcm_config(): ''' Get the current Local Configuration Manager settings Returns: dict: A dictionary representing the Local Configuration Manager settings on the machine CLI Example: .. code-block:: bash salt '*' dsc.get_lcm_config ''' cmd = 'Get-DscLocalConfigurationManager | ' \ 'Select-Object -Property ConfigurationModeFrequencyMins, LCMState, ' \ 'RebootNodeIfNeeded, ConfigurationMode, ActionAfterReboot, ' \ 'RefreshMode, CertificateID, ConfigurationID, RefreshFrequencyMins, ' \ 'AllowModuleOverwrite, DebugMode, StatusRetentionTimeInDays ' return _pshell(cmd) def set_lcm_config(config_mode=None, config_mode_freq=None, refresh_freq=None, reboot_if_needed=None, action_after_reboot=None, refresh_mode=None, certificate_id=None, configuration_id=None, allow_module_overwrite=None, debug_mode=False, status_retention_days=None): ''' For detailed descriptions of the parameters see: https://msdn.microsoft.com/en-us/PowerShell/DSC/metaConfig config_mode (str): How the LCM applies the configuration. Valid values are: - ApplyOnly - ApplyAndMonitor - ApplyAndAutoCorrect config_mode_freq (int): How often, in minutes, the current configuration is checked and applied. Ignored if config_mode is set to ApplyOnly. Default is 15. refresh_mode (str): How the LCM gets configurations. Valid values are: - Disabled - Push - Pull refresh_freq (int): How often, in minutes, the LCM checks for updated configurations. (pull mode only) Default is 30. reboot_if_needed (bool): Reboot the machine if needed after a configuration is applied. Default is False. action_after_reboot (str): Action to take after reboot. Valid values are: - ContinueConfiguration - StopConfiguration certificate_id (guid): A GUID that specifies a certificate used to access the configuration: (pull mode) configuration_id (guid): A GUID that identifies the config file to get from a pull server. (pull mode) allow_module_overwrite (bool): New configs are allowed to overwrite old ones on the target node. debug_mode (str): Sets the debug level. Valid values are: - None - ForceModuleImport - All status_retention_days (int): Number of days to keep status of the current config. .. note:: Either ``config_mode_freq`` or ``refresh_freq`` needs to be a multiple of the other. See documentation on MSDN for more details. Returns: bool: True if successful, otherwise False CLI Example: .. code-block:: bash salt '*' dsc.set_lcm_config ApplyOnly ''' temp_dir = os.getenv('TEMP', '{0}\\temp'.format(os.getenv('WINDIR'))) cmd = 'Configuration SaltConfig {' cmd += ' Node localhost {' cmd += ' LocalConfigurationManager {' if config_mode: if config_mode not in ('ApplyOnly', 'ApplyAndMonitor', 'ApplyAndAutoCorrect'): error = 'config_mode must be one of ApplyOnly, ApplyAndMonitor, ' \ 'or ApplyAndAutoCorrect. Passed {0}'.format(config_mode) raise SaltInvocationError(error) cmd += ' ConfigurationMode = "{0}";'.format(config_mode) if config_mode_freq: if not isinstance(config_mode_freq, int): error = 'config_mode_freq must be an integer. Passed {0}'.format( config_mode_freq ) raise SaltInvocationError(error) cmd += ' ConfigurationModeFrequencyMins = {0};'.format(config_mode_freq) if refresh_mode: if refresh_mode not in ('Disabled', 'Push', 'Pull'): raise SaltInvocationError( 'refresh_mode must be one of Disabled, Push, or Pull' ) cmd += ' RefreshMode = "{0}";'.format(refresh_mode) if refresh_freq: if not isinstance(refresh_freq, int): raise SaltInvocationError('refresh_freq must be an integer') cmd += ' RefreshFrequencyMins = {0};'.format(refresh_freq) if reboot_if_needed is not None: if not isinstance(reboot_if_needed, bool): raise SaltInvocationError('reboot_if_needed must be a boolean value') if reboot_if_needed: reboot_if_needed = '$true' else: reboot_if_needed = '$false' cmd += ' RebootNodeIfNeeded = {0};'.format(reboot_if_needed) if action_after_reboot: if action_after_reboot not in ('ContinueConfiguration', 'StopConfiguration'): raise SaltInvocationError( 'action_after_reboot must be one of ' 'ContinueConfiguration or StopConfiguration' ) cmd += ' ActionAfterReboot = "{0}"'.format(action_after_reboot) if certificate_id is not None: if certificate_id == '': certificate_id = None cmd += ' CertificateID = "{0}";'.format(certificate_id) if configuration_id is not None: if configuration_id == '': configuration_id = None cmd += ' ConfigurationID = "{0}";'.format(configuration_id) if allow_module_overwrite is not None: if not isinstance(allow_module_overwrite, bool): raise SaltInvocationError('allow_module_overwrite must be a boolean value') if allow_module_overwrite: allow_module_overwrite = '$true' else: allow_module_overwrite = '$false' cmd += ' AllowModuleOverwrite = {0};'.format(allow_module_overwrite) if debug_mode is not False: if debug_mode is None: debug_mode = 'None' if debug_mode not in ('None', 'ForceModuleImport', 'All'): raise SaltInvocationError( 'debug_mode must be one of None, ForceModuleImport, ' 'ResourceScriptBreakAll, or All' ) cmd += ' DebugMode = "{0}";'.format(debug_mode) if status_retention_days: if not isinstance(status_retention_days, int): raise SaltInvocationError('status_retention_days must be an integer') cmd += ' StatusRetentionTimeInDays = {0};'.format(status_retention_days) cmd += ' }}};' cmd += r'SaltConfig -OutputPath "{0}\SaltConfig"'.format(temp_dir) # Execute Config to create the .mof _pshell(cmd) # Apply the config cmd = r'Set-DscLocalConfigurationManager -Path "{0}\SaltConfig"' \ r''.format(temp_dir) ret = __salt__['cmd.run_all'](cmd, shell='powershell', python_shell=True) __salt__['file.remove'](r'{0}\SaltConfig'.format(temp_dir)) if not ret['retcode']: log.info('DSC: LCM config applied successfully') return True else: log.error('DSC: Failed to apply LCM config. Error %s', ret) return False
saltstack/salt
salt/modules/win_dsc.py
remove_config
python
def remove_config(reset=False): ''' Remove the current DSC Configuration. Removes current, pending, and previous dsc configurations. .. versionadded:: 2017.7.5 Args: reset (bool): Attempts to reset the DSC configuration by removing the following from ``C:\\Windows\\System32\\Configuration``: - File: DSCStatusHistory.mof - File: DSCEngineCache.mof - Dir: ConfigurationStatus Default is False .. warning:: ``remove_config`` may fail to reset the DSC environment if any of the files in the ``ConfigurationStatus`` directory. If you wait a few minutes and run again, it may complete successfully. Returns: bool: True if successful Raises: CommandExecutionError: On failure CLI Example: .. code-block:: bash salt '*' dsc.remove_config True ''' # Stopping a running config (not likely to occur) cmd = 'Stop-DscConfiguration' log.info('DSC: Stopping Running Configuration') try: _pshell(cmd) except CommandExecutionError as exc: if exc.info['retcode'] != 0: raise CommandExecutionError('Failed to Stop DSC Configuration', info=exc.info) log.info('DSC: %s', exc.info['stdout']) # Remove configuration files cmd = 'Remove-DscConfigurationDocument -Stage Current, Pending, Previous ' \ '-Force' log.info('DSC: Removing Configuration') try: _pshell(cmd) except CommandExecutionError as exc: if exc.info['retcode'] != 0: raise CommandExecutionError('Failed to remove DSC Configuration', info=exc.info) log.info('DSC: %s', exc.info['stdout']) if not reset: return True def _remove_fs_obj(path): if os.path.exists(path): log.info('DSC: Removing %s', path) if not __salt__['file.remove'](path): error = 'Failed to remove {0}'.format(path) log.error('DSC: %s', error) raise CommandExecutionError(error) dsc_config_dir = '{0}\\System32\\Configuration' \ ''.format(os.getenv('SystemRoot', 'C:\\Windows')) # Remove History _remove_fs_obj('{0}\\DSCStatusHistory.mof'.format(dsc_config_dir)) # Remove Engine Cache _remove_fs_obj('{0}\\DSCEngineCache.mof'.format(dsc_config_dir)) # Remove Status Directory _remove_fs_obj('{0}\\ConfigurationStatus'.format(dsc_config_dir)) return True
Remove the current DSC Configuration. Removes current, pending, and previous dsc configurations. .. versionadded:: 2017.7.5 Args: reset (bool): Attempts to reset the DSC configuration by removing the following from ``C:\\Windows\\System32\\Configuration``: - File: DSCStatusHistory.mof - File: DSCEngineCache.mof - Dir: ConfigurationStatus Default is False .. warning:: ``remove_config`` may fail to reset the DSC environment if any of the files in the ``ConfigurationStatus`` directory. If you wait a few minutes and run again, it may complete successfully. Returns: bool: True if successful Raises: CommandExecutionError: On failure CLI Example: .. code-block:: bash salt '*' dsc.remove_config True
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_dsc.py#L451-L532
[ "def _pshell(cmd, cwd=None, json_depth=2, ignore_retcode=False):\n '''\n Execute the desired PowerShell command and ensure that it returns data\n in json format and load that into python. Either return a dict or raise a\n CommandExecutionError.\n '''\n if 'convertto-json' not in cmd.lower():\n cmd = '{0} | ConvertTo-Json -Depth {1}'.format(cmd, json_depth)\n log.debug('DSC: %s', cmd)\n results = __salt__['cmd.run_all'](\n cmd, shell='powershell', cwd=cwd, python_shell=True,\n ignore_retcode=ignore_retcode)\n\n if 'pid' in results:\n del results['pid']\n\n if 'retcode' not in results or results['retcode'] != 0:\n # run_all logs an error to log.error, fail hard back to the user\n raise CommandExecutionError(\n 'Issue executing PowerShell {0}'.format(cmd), info=results)\n\n # Sometimes Powershell returns an empty string, which isn't valid JSON\n if results['stdout'] == '':\n results['stdout'] = '{}'\n\n try:\n ret = salt.utils.json.loads(results['stdout'], strict=False)\n except ValueError:\n raise CommandExecutionError(\n 'No JSON results from PowerShell', info=results)\n\n log.info('DSC: Returning \"%s\"', ret)\n return ret\n", "def _remove_fs_obj(path):\n if os.path.exists(path):\n log.info('DSC: Removing %s', path)\n if not __salt__['file.remove'](path):\n error = 'Failed to remove {0}'.format(path)\n log.error('DSC: %s', error)\n raise CommandExecutionError(error)\n" ]
# -*- coding: utf-8 -*- ''' Module for working with Windows PowerShell DSC (Desired State Configuration) This module is Alpha This module applies DSC Configurations in the form of PowerShell scripts or MOF (Managed Object Format) schema files. Use the ``psget`` module to manage PowerShell resources. The idea is to leverage Salt to push DSC configuration scripts or MOF files to the Minion. :depends: - PowerShell 5.0 ''' from __future__ import absolute_import, unicode_literals, print_function # Import Python libs import logging import os # Import Salt libs import salt.utils.json import salt.utils.platform import salt.utils.versions from salt.exceptions import CommandExecutionError, SaltInvocationError # Set up logging log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'dsc' def __virtual__(): ''' Set the system module of the kernel is Windows ''' # Verify Windows if not salt.utils.platform.is_windows(): log.debug('DSC: Only available on Windows systems') return False, 'DSC: Only available on Windows systems' # Verify PowerShell powershell_info = __salt__['cmd.shell_info']('powershell') if not powershell_info['installed']: log.debug('DSC: Requires PowerShell') return False, 'DSC: Requires PowerShell' # Verify PowerShell 5.0 or greater if salt.utils.versions.compare(powershell_info['version'], '<', '5.0'): log.debug('DSC: Requires PowerShell 5 or later') return False, 'DSC: Requires PowerShell 5 or later' return __virtualname__ def _pshell(cmd, cwd=None, json_depth=2, ignore_retcode=False): ''' Execute the desired PowerShell command and ensure that it returns data in json format and load that into python. Either return a dict or raise a CommandExecutionError. ''' if 'convertto-json' not in cmd.lower(): cmd = '{0} | ConvertTo-Json -Depth {1}'.format(cmd, json_depth) log.debug('DSC: %s', cmd) results = __salt__['cmd.run_all']( cmd, shell='powershell', cwd=cwd, python_shell=True, ignore_retcode=ignore_retcode) if 'pid' in results: del results['pid'] if 'retcode' not in results or results['retcode'] != 0: # run_all logs an error to log.error, fail hard back to the user raise CommandExecutionError( 'Issue executing PowerShell {0}'.format(cmd), info=results) # Sometimes Powershell returns an empty string, which isn't valid JSON if results['stdout'] == '': results['stdout'] = '{}' try: ret = salt.utils.json.loads(results['stdout'], strict=False) except ValueError: raise CommandExecutionError( 'No JSON results from PowerShell', info=results) log.info('DSC: Returning "%s"', ret) return ret def run_config(path, source=None, config_name=None, config_data=None, config_data_source=None, script_parameters=None, salt_env='base'): r''' Compile a DSC Configuration in the form of a PowerShell script (.ps1) and apply it. The PowerShell script can be cached from the master using the ``source`` option. If there is more than one config within the PowerShell script, the desired configuration can be applied by passing the name in the ``config`` option. This command would be the equivalent of running ``dsc.compile_config`` followed by ``dsc.apply_config``. Args: path (str): The local path to the PowerShell script that contains the DSC Configuration. Required. source (str): The path to the script on ``file_roots`` to cache at the location specified by ``path``. The source file will be cached locally and then executed. If source is not passed, the config script located at ``path`` will be compiled. Optional. config_name (str): The name of the Configuration within the script to apply. If the script contains multiple configurations within the file a ``config_name`` must be specified. If the ``config_name`` is not specified, the name of the file will be used as the ``config_name`` to run. Optional. config_data (str): Configuration data in the form of a hash table that will be passed to the ``ConfigurationData`` parameter when the ``config_name`` is compiled. This can be the path to a ``.psd1`` file containing the proper hash table or the PowerShell code to create the hash table. .. versionadded:: 2017.7.0 config_data_source (str): The path to the ``.psd1`` file on ``file_roots`` to cache at the location specified by ``config_data``. If this is specified, ``config_data`` must be a local path instead of a hash table. .. versionadded:: 2017.7.0 script_parameters (str): Any additional parameters expected by the configuration script. These must be defined in the script itself. .. versionadded:: 2017.7.0 salt_env (str): The salt environment to use when copying the source. Default is 'base' Returns: bool: True if successfully compiled and applied, otherwise False CLI Example: To compile a config from a script that already exists on the system: .. code-block:: bash salt '*' dsc.run_config C:\\DSC\\WebsiteConfig.ps1 To cache a config script to the system from the master and compile it: .. code-block:: bash salt '*' dsc.run_config C:\\DSC\\WebsiteConfig.ps1 salt://dsc/configs/WebsiteConfig.ps1 ''' ret = compile_config(path=path, source=source, config_name=config_name, config_data=config_data, config_data_source=config_data_source, script_parameters=script_parameters, salt_env=salt_env) if ret.get('Exists'): config_path = os.path.dirname(ret['FullName']) return apply_config(config_path) else: return False def compile_config(path, source=None, config_name=None, config_data=None, config_data_source=None, script_parameters=None, salt_env='base'): r''' Compile a config from a PowerShell script (``.ps1``) Args: path (str): Path (local) to the script that will create the ``.mof`` configuration file. If no source is passed, the file must exist locally. Required. source (str): Path to the script on ``file_roots`` to cache at the location specified by ``path``. The source file will be cached locally and then executed. If source is not passed, the config script located at ``path`` will be compiled. Optional. config_name (str): The name of the Configuration within the script to apply. If the script contains multiple configurations within the file a ``config_name`` must be specified. If the ``config_name`` is not specified, the name of the file will be used as the ``config_name`` to run. Optional. config_data (str): Configuration data in the form of a hash table that will be passed to the ``ConfigurationData`` parameter when the ``config_name`` is compiled. This can be the path to a ``.psd1`` file containing the proper hash table or the PowerShell code to create the hash table. .. versionadded:: 2017.7.0 config_data_source (str): The path to the ``.psd1`` file on ``file_roots`` to cache at the location specified by ``config_data``. If this is specified, ``config_data`` must be a local path instead of a hash table. .. versionadded:: 2017.7.0 script_parameters (str): Any additional parameters expected by the configuration script. These must be defined in the script itself. .. versionadded:: 2017.7.0 salt_env (str): The salt environment to use when copying the source. Default is 'base' Returns: dict: A dictionary containing the results of the compilation CLI Example: To compile a config from a script that already exists on the system: .. code-block:: bash salt '*' dsc.compile_config C:\\DSC\\WebsiteConfig.ps1 To cache a config script to the system from the master and compile it: .. code-block:: bash salt '*' dsc.compile_config C:\\DSC\\WebsiteConfig.ps1 salt://dsc/configs/WebsiteConfig.ps1 ''' if source: log.info('DSC: Caching %s', source) cached_files = __salt__['cp.get_file'](path=source, dest=path, saltenv=salt_env, makedirs=True) if not cached_files: error = 'Failed to cache {0}'.format(source) log.error('DSC: %s', error) raise CommandExecutionError(error) if config_data_source: log.info('DSC: Caching %s', config_data_source) cached_files = __salt__['cp.get_file'](path=config_data_source, dest=config_data, saltenv=salt_env, makedirs=True) if not cached_files: error = 'Failed to cache {0}'.format(config_data_source) log.error('DSC: %s', error) raise CommandExecutionError(error) # Make sure the path exists if not os.path.exists(path): error = '"{0}" not found'.format(path) log.error('DSC: %s', error) raise CommandExecutionError(error) if config_name is None: # If the name of the config isn't passed, make it the name of the .ps1 config_name = os.path.splitext(os.path.basename(path))[0] cwd = os.path.dirname(path) # Run the script and see if the compile command is in the script cmd = [path] # Add any script parameters if script_parameters: cmd.append(script_parameters) # Select fields to return cmd.append('| Select-Object -Property FullName, Extension, Exists, ' '@{Name="LastWriteTime";Expression={Get-Date ($_.LastWriteTime) ' '-Format g}}') cmd = ' '.join(cmd) ret = _pshell(cmd, cwd) if ret: # Script compiled, return results if ret.get('Exists'): log.info('DSC: Compile Config: %s', ret) return ret # If you get to this point, the script did not contain a compile command # dot source the script to compile the state and generate the mof file cmd = ['.', path] if script_parameters: cmd.append(script_parameters) cmd.extend([';', config_name]) if config_data: cmd.append(config_data) cmd.append('| Select-Object -Property FullName, Extension, Exists, ' '@{Name="LastWriteTime";Expression={Get-Date ($_.LastWriteTime) ' '-Format g}}') cmd = ' '.join(cmd) ret = _pshell(cmd, cwd) if ret: # Script compiled, return results if ret.get('Exists'): log.info('DSC: Compile Config: %s', ret) return ret error = 'Failed to compile config: {0}'.format(path) error += '\nReturned: {0}'.format(ret) log.error('DSC: %s', error) raise CommandExecutionError(error) def apply_config(path, source=None, salt_env='base'): r''' Run an compiled DSC configuration (a folder containing a .mof file). The folder can be cached from the salt master using the ``source`` option. Args: path (str): Local path to the directory that contains the .mof configuration file to apply. Required. source (str): Path to the directory that contains the .mof file on the ``file_roots``. The source directory will be copied to the path directory and then executed. If the path and source directories differ, the source directory will be applied. If source is not passed, the config located at ``path`` will be applied. Optional. salt_env (str): The salt environment to use when copying your source. Default is 'base' Returns: bool: True if successful, otherwise False CLI Example: To apply a config that already exists on the the system .. code-block:: bash salt '*' dsc.apply_config C:\\DSC\\WebSiteConfiguration To cache a configuration from the master and apply it: .. code-block:: bash salt '*' dsc.apply_config C:\\DSC\\WebSiteConfiguration salt://dsc/configs/WebSiteConfiguration ''' # If you're getting an error along the lines of "The client cannot connect # to the destination specified in the request.", try the following: # Enable-PSRemoting -SkipNetworkProfileCheck config = path if source: # Make sure the folder names match path_name = os.path.basename(os.path.normpath(path)) source_name = os.path.basename(os.path.normpath(source)) if path_name.lower() != source_name.lower(): # Append the Source name to the Path path = '{0}\\{1}'.format(path, source_name) log.debug('DSC: %s appended to the path.', source_name) # Destination path minus the basename dest_path = os.path.dirname(os.path.normpath(path)) log.info('DSC: Caching %s', source) cached_files = __salt__['cp.get_dir'](source, dest_path, salt_env) if not cached_files: error = 'Failed to copy {0}'.format(source) log.error('DSC: %s', error) raise CommandExecutionError(error) else: config = os.path.dirname(cached_files[0]) # Make sure the path exists if not os.path.exists(config): error = '{0} not found'.format(config) log.error('DSC: %s', error) raise CommandExecutionError(error) # Run the DSC Configuration # Putting quotes around the parameter protects against command injection cmd = 'Start-DscConfiguration -Path "{0}" -Wait -Force'.format(config) _pshell(cmd) cmd = '$status = Get-DscConfigurationStatus; $status.Status' ret = _pshell(cmd) log.info('DSC: Apply Config: %s', ret) return ret == 'Success' or ret == {} def get_config(): ''' Get the current DSC Configuration Returns: dict: A dictionary representing the DSC Configuration on the machine Raises: CommandExecutionError: On failure CLI Example: .. code-block:: bash salt '*' dsc.get_config ''' cmd = 'Get-DscConfiguration | Select-Object * -ExcludeProperty Cim*' try: raw_config = _pshell(cmd, ignore_retcode=True) except CommandExecutionError as exc: if 'Current configuration does not exist' in exc.info['stderr']: raise CommandExecutionError('Not Configured') raise config = dict() if raw_config: # Get DSC Configuration Name if 'ConfigurationName' in raw_config[0]: config[raw_config[0]['ConfigurationName']] = {} # Add all DSC Configurations by ResourceId for item in raw_config: config[item['ConfigurationName']][item['ResourceId']] = {} for key in item: if key not in ['ConfigurationName', 'ResourceId']: config[item['ConfigurationName']][item['ResourceId']][key] = item[key] return config def restore_config(): ''' Reapplies the previous configuration. .. versionadded:: 2017.7.5 .. note:: The current configuration will be come the previous configuration. If run a second time back-to-back it is like toggling between two configs. Returns: bool: True if successfully restored Raises: CommandExecutionError: On failure CLI Example: .. code-block:: bash salt '*' dsc.restore_config ''' cmd = 'Restore-DscConfiguration' try: _pshell(cmd, ignore_retcode=True) except CommandExecutionError as exc: if 'A previous configuration does not exist' in exc.info['stderr']: raise CommandExecutionError('Previous Configuration Not Found') raise return True def test_config(): ''' Tests the current applied DSC Configuration Returns: bool: True if successfully applied, otherwise False CLI Example: .. code-block:: bash salt '*' dsc.test_config ''' cmd = 'Test-DscConfiguration' try: _pshell(cmd, ignore_retcode=True) except CommandExecutionError as exc: if 'Current configuration does not exist' in exc.info['stderr']: raise CommandExecutionError('Not Configured') raise def get_config_status(): ''' Get the status of the current DSC Configuration Returns: dict: A dictionary representing the status of the current DSC Configuration on the machine CLI Example: .. code-block:: bash salt '*' dsc.get_config_status ''' cmd = 'Get-DscConfigurationStatus | ' \ 'Select-Object -Property HostName, Status, MetaData, ' \ '@{Name="StartDate";Expression={Get-Date ($_.StartDate) -Format g}}, ' \ 'Type, Mode, RebootRequested, NumberofResources' try: return _pshell(cmd, ignore_retcode=True) except CommandExecutionError as exc: if 'No status information available' in exc.info['stderr']: raise CommandExecutionError('Not Configured') raise def get_lcm_config(): ''' Get the current Local Configuration Manager settings Returns: dict: A dictionary representing the Local Configuration Manager settings on the machine CLI Example: .. code-block:: bash salt '*' dsc.get_lcm_config ''' cmd = 'Get-DscLocalConfigurationManager | ' \ 'Select-Object -Property ConfigurationModeFrequencyMins, LCMState, ' \ 'RebootNodeIfNeeded, ConfigurationMode, ActionAfterReboot, ' \ 'RefreshMode, CertificateID, ConfigurationID, RefreshFrequencyMins, ' \ 'AllowModuleOverwrite, DebugMode, StatusRetentionTimeInDays ' return _pshell(cmd) def set_lcm_config(config_mode=None, config_mode_freq=None, refresh_freq=None, reboot_if_needed=None, action_after_reboot=None, refresh_mode=None, certificate_id=None, configuration_id=None, allow_module_overwrite=None, debug_mode=False, status_retention_days=None): ''' For detailed descriptions of the parameters see: https://msdn.microsoft.com/en-us/PowerShell/DSC/metaConfig config_mode (str): How the LCM applies the configuration. Valid values are: - ApplyOnly - ApplyAndMonitor - ApplyAndAutoCorrect config_mode_freq (int): How often, in minutes, the current configuration is checked and applied. Ignored if config_mode is set to ApplyOnly. Default is 15. refresh_mode (str): How the LCM gets configurations. Valid values are: - Disabled - Push - Pull refresh_freq (int): How often, in minutes, the LCM checks for updated configurations. (pull mode only) Default is 30. reboot_if_needed (bool): Reboot the machine if needed after a configuration is applied. Default is False. action_after_reboot (str): Action to take after reboot. Valid values are: - ContinueConfiguration - StopConfiguration certificate_id (guid): A GUID that specifies a certificate used to access the configuration: (pull mode) configuration_id (guid): A GUID that identifies the config file to get from a pull server. (pull mode) allow_module_overwrite (bool): New configs are allowed to overwrite old ones on the target node. debug_mode (str): Sets the debug level. Valid values are: - None - ForceModuleImport - All status_retention_days (int): Number of days to keep status of the current config. .. note:: Either ``config_mode_freq`` or ``refresh_freq`` needs to be a multiple of the other. See documentation on MSDN for more details. Returns: bool: True if successful, otherwise False CLI Example: .. code-block:: bash salt '*' dsc.set_lcm_config ApplyOnly ''' temp_dir = os.getenv('TEMP', '{0}\\temp'.format(os.getenv('WINDIR'))) cmd = 'Configuration SaltConfig {' cmd += ' Node localhost {' cmd += ' LocalConfigurationManager {' if config_mode: if config_mode not in ('ApplyOnly', 'ApplyAndMonitor', 'ApplyAndAutoCorrect'): error = 'config_mode must be one of ApplyOnly, ApplyAndMonitor, ' \ 'or ApplyAndAutoCorrect. Passed {0}'.format(config_mode) raise SaltInvocationError(error) cmd += ' ConfigurationMode = "{0}";'.format(config_mode) if config_mode_freq: if not isinstance(config_mode_freq, int): error = 'config_mode_freq must be an integer. Passed {0}'.format( config_mode_freq ) raise SaltInvocationError(error) cmd += ' ConfigurationModeFrequencyMins = {0};'.format(config_mode_freq) if refresh_mode: if refresh_mode not in ('Disabled', 'Push', 'Pull'): raise SaltInvocationError( 'refresh_mode must be one of Disabled, Push, or Pull' ) cmd += ' RefreshMode = "{0}";'.format(refresh_mode) if refresh_freq: if not isinstance(refresh_freq, int): raise SaltInvocationError('refresh_freq must be an integer') cmd += ' RefreshFrequencyMins = {0};'.format(refresh_freq) if reboot_if_needed is not None: if not isinstance(reboot_if_needed, bool): raise SaltInvocationError('reboot_if_needed must be a boolean value') if reboot_if_needed: reboot_if_needed = '$true' else: reboot_if_needed = '$false' cmd += ' RebootNodeIfNeeded = {0};'.format(reboot_if_needed) if action_after_reboot: if action_after_reboot not in ('ContinueConfiguration', 'StopConfiguration'): raise SaltInvocationError( 'action_after_reboot must be one of ' 'ContinueConfiguration or StopConfiguration' ) cmd += ' ActionAfterReboot = "{0}"'.format(action_after_reboot) if certificate_id is not None: if certificate_id == '': certificate_id = None cmd += ' CertificateID = "{0}";'.format(certificate_id) if configuration_id is not None: if configuration_id == '': configuration_id = None cmd += ' ConfigurationID = "{0}";'.format(configuration_id) if allow_module_overwrite is not None: if not isinstance(allow_module_overwrite, bool): raise SaltInvocationError('allow_module_overwrite must be a boolean value') if allow_module_overwrite: allow_module_overwrite = '$true' else: allow_module_overwrite = '$false' cmd += ' AllowModuleOverwrite = {0};'.format(allow_module_overwrite) if debug_mode is not False: if debug_mode is None: debug_mode = 'None' if debug_mode not in ('None', 'ForceModuleImport', 'All'): raise SaltInvocationError( 'debug_mode must be one of None, ForceModuleImport, ' 'ResourceScriptBreakAll, or All' ) cmd += ' DebugMode = "{0}";'.format(debug_mode) if status_retention_days: if not isinstance(status_retention_days, int): raise SaltInvocationError('status_retention_days must be an integer') cmd += ' StatusRetentionTimeInDays = {0};'.format(status_retention_days) cmd += ' }}};' cmd += r'SaltConfig -OutputPath "{0}\SaltConfig"'.format(temp_dir) # Execute Config to create the .mof _pshell(cmd) # Apply the config cmd = r'Set-DscLocalConfigurationManager -Path "{0}\SaltConfig"' \ r''.format(temp_dir) ret = __salt__['cmd.run_all'](cmd, shell='powershell', python_shell=True) __salt__['file.remove'](r'{0}\SaltConfig'.format(temp_dir)) if not ret['retcode']: log.info('DSC: LCM config applied successfully') return True else: log.error('DSC: Failed to apply LCM config. Error %s', ret) return False
saltstack/salt
salt/modules/win_dsc.py
restore_config
python
def restore_config(): ''' Reapplies the previous configuration. .. versionadded:: 2017.7.5 .. note:: The current configuration will be come the previous configuration. If run a second time back-to-back it is like toggling between two configs. Returns: bool: True if successfully restored Raises: CommandExecutionError: On failure CLI Example: .. code-block:: bash salt '*' dsc.restore_config ''' cmd = 'Restore-DscConfiguration' try: _pshell(cmd, ignore_retcode=True) except CommandExecutionError as exc: if 'A previous configuration does not exist' in exc.info['stderr']: raise CommandExecutionError('Previous Configuration Not Found') raise return True
Reapplies the previous configuration. .. versionadded:: 2017.7.5 .. note:: The current configuration will be come the previous configuration. If run a second time back-to-back it is like toggling between two configs. Returns: bool: True if successfully restored Raises: CommandExecutionError: On failure CLI Example: .. code-block:: bash salt '*' dsc.restore_config
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_dsc.py#L535-L564
[ "def _pshell(cmd, cwd=None, json_depth=2, ignore_retcode=False):\n '''\n Execute the desired PowerShell command and ensure that it returns data\n in json format and load that into python. Either return a dict or raise a\n CommandExecutionError.\n '''\n if 'convertto-json' not in cmd.lower():\n cmd = '{0} | ConvertTo-Json -Depth {1}'.format(cmd, json_depth)\n log.debug('DSC: %s', cmd)\n results = __salt__['cmd.run_all'](\n cmd, shell='powershell', cwd=cwd, python_shell=True,\n ignore_retcode=ignore_retcode)\n\n if 'pid' in results:\n del results['pid']\n\n if 'retcode' not in results or results['retcode'] != 0:\n # run_all logs an error to log.error, fail hard back to the user\n raise CommandExecutionError(\n 'Issue executing PowerShell {0}'.format(cmd), info=results)\n\n # Sometimes Powershell returns an empty string, which isn't valid JSON\n if results['stdout'] == '':\n results['stdout'] = '{}'\n\n try:\n ret = salt.utils.json.loads(results['stdout'], strict=False)\n except ValueError:\n raise CommandExecutionError(\n 'No JSON results from PowerShell', info=results)\n\n log.info('DSC: Returning \"%s\"', ret)\n return ret\n" ]
# -*- coding: utf-8 -*- ''' Module for working with Windows PowerShell DSC (Desired State Configuration) This module is Alpha This module applies DSC Configurations in the form of PowerShell scripts or MOF (Managed Object Format) schema files. Use the ``psget`` module to manage PowerShell resources. The idea is to leverage Salt to push DSC configuration scripts or MOF files to the Minion. :depends: - PowerShell 5.0 ''' from __future__ import absolute_import, unicode_literals, print_function # Import Python libs import logging import os # Import Salt libs import salt.utils.json import salt.utils.platform import salt.utils.versions from salt.exceptions import CommandExecutionError, SaltInvocationError # Set up logging log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'dsc' def __virtual__(): ''' Set the system module of the kernel is Windows ''' # Verify Windows if not salt.utils.platform.is_windows(): log.debug('DSC: Only available on Windows systems') return False, 'DSC: Only available on Windows systems' # Verify PowerShell powershell_info = __salt__['cmd.shell_info']('powershell') if not powershell_info['installed']: log.debug('DSC: Requires PowerShell') return False, 'DSC: Requires PowerShell' # Verify PowerShell 5.0 or greater if salt.utils.versions.compare(powershell_info['version'], '<', '5.0'): log.debug('DSC: Requires PowerShell 5 or later') return False, 'DSC: Requires PowerShell 5 or later' return __virtualname__ def _pshell(cmd, cwd=None, json_depth=2, ignore_retcode=False): ''' Execute the desired PowerShell command and ensure that it returns data in json format and load that into python. Either return a dict or raise a CommandExecutionError. ''' if 'convertto-json' not in cmd.lower(): cmd = '{0} | ConvertTo-Json -Depth {1}'.format(cmd, json_depth) log.debug('DSC: %s', cmd) results = __salt__['cmd.run_all']( cmd, shell='powershell', cwd=cwd, python_shell=True, ignore_retcode=ignore_retcode) if 'pid' in results: del results['pid'] if 'retcode' not in results or results['retcode'] != 0: # run_all logs an error to log.error, fail hard back to the user raise CommandExecutionError( 'Issue executing PowerShell {0}'.format(cmd), info=results) # Sometimes Powershell returns an empty string, which isn't valid JSON if results['stdout'] == '': results['stdout'] = '{}' try: ret = salt.utils.json.loads(results['stdout'], strict=False) except ValueError: raise CommandExecutionError( 'No JSON results from PowerShell', info=results) log.info('DSC: Returning "%s"', ret) return ret def run_config(path, source=None, config_name=None, config_data=None, config_data_source=None, script_parameters=None, salt_env='base'): r''' Compile a DSC Configuration in the form of a PowerShell script (.ps1) and apply it. The PowerShell script can be cached from the master using the ``source`` option. If there is more than one config within the PowerShell script, the desired configuration can be applied by passing the name in the ``config`` option. This command would be the equivalent of running ``dsc.compile_config`` followed by ``dsc.apply_config``. Args: path (str): The local path to the PowerShell script that contains the DSC Configuration. Required. source (str): The path to the script on ``file_roots`` to cache at the location specified by ``path``. The source file will be cached locally and then executed. If source is not passed, the config script located at ``path`` will be compiled. Optional. config_name (str): The name of the Configuration within the script to apply. If the script contains multiple configurations within the file a ``config_name`` must be specified. If the ``config_name`` is not specified, the name of the file will be used as the ``config_name`` to run. Optional. config_data (str): Configuration data in the form of a hash table that will be passed to the ``ConfigurationData`` parameter when the ``config_name`` is compiled. This can be the path to a ``.psd1`` file containing the proper hash table or the PowerShell code to create the hash table. .. versionadded:: 2017.7.0 config_data_source (str): The path to the ``.psd1`` file on ``file_roots`` to cache at the location specified by ``config_data``. If this is specified, ``config_data`` must be a local path instead of a hash table. .. versionadded:: 2017.7.0 script_parameters (str): Any additional parameters expected by the configuration script. These must be defined in the script itself. .. versionadded:: 2017.7.0 salt_env (str): The salt environment to use when copying the source. Default is 'base' Returns: bool: True if successfully compiled and applied, otherwise False CLI Example: To compile a config from a script that already exists on the system: .. code-block:: bash salt '*' dsc.run_config C:\\DSC\\WebsiteConfig.ps1 To cache a config script to the system from the master and compile it: .. code-block:: bash salt '*' dsc.run_config C:\\DSC\\WebsiteConfig.ps1 salt://dsc/configs/WebsiteConfig.ps1 ''' ret = compile_config(path=path, source=source, config_name=config_name, config_data=config_data, config_data_source=config_data_source, script_parameters=script_parameters, salt_env=salt_env) if ret.get('Exists'): config_path = os.path.dirname(ret['FullName']) return apply_config(config_path) else: return False def compile_config(path, source=None, config_name=None, config_data=None, config_data_source=None, script_parameters=None, salt_env='base'): r''' Compile a config from a PowerShell script (``.ps1``) Args: path (str): Path (local) to the script that will create the ``.mof`` configuration file. If no source is passed, the file must exist locally. Required. source (str): Path to the script on ``file_roots`` to cache at the location specified by ``path``. The source file will be cached locally and then executed. If source is not passed, the config script located at ``path`` will be compiled. Optional. config_name (str): The name of the Configuration within the script to apply. If the script contains multiple configurations within the file a ``config_name`` must be specified. If the ``config_name`` is not specified, the name of the file will be used as the ``config_name`` to run. Optional. config_data (str): Configuration data in the form of a hash table that will be passed to the ``ConfigurationData`` parameter when the ``config_name`` is compiled. This can be the path to a ``.psd1`` file containing the proper hash table or the PowerShell code to create the hash table. .. versionadded:: 2017.7.0 config_data_source (str): The path to the ``.psd1`` file on ``file_roots`` to cache at the location specified by ``config_data``. If this is specified, ``config_data`` must be a local path instead of a hash table. .. versionadded:: 2017.7.0 script_parameters (str): Any additional parameters expected by the configuration script. These must be defined in the script itself. .. versionadded:: 2017.7.0 salt_env (str): The salt environment to use when copying the source. Default is 'base' Returns: dict: A dictionary containing the results of the compilation CLI Example: To compile a config from a script that already exists on the system: .. code-block:: bash salt '*' dsc.compile_config C:\\DSC\\WebsiteConfig.ps1 To cache a config script to the system from the master and compile it: .. code-block:: bash salt '*' dsc.compile_config C:\\DSC\\WebsiteConfig.ps1 salt://dsc/configs/WebsiteConfig.ps1 ''' if source: log.info('DSC: Caching %s', source) cached_files = __salt__['cp.get_file'](path=source, dest=path, saltenv=salt_env, makedirs=True) if not cached_files: error = 'Failed to cache {0}'.format(source) log.error('DSC: %s', error) raise CommandExecutionError(error) if config_data_source: log.info('DSC: Caching %s', config_data_source) cached_files = __salt__['cp.get_file'](path=config_data_source, dest=config_data, saltenv=salt_env, makedirs=True) if not cached_files: error = 'Failed to cache {0}'.format(config_data_source) log.error('DSC: %s', error) raise CommandExecutionError(error) # Make sure the path exists if not os.path.exists(path): error = '"{0}" not found'.format(path) log.error('DSC: %s', error) raise CommandExecutionError(error) if config_name is None: # If the name of the config isn't passed, make it the name of the .ps1 config_name = os.path.splitext(os.path.basename(path))[0] cwd = os.path.dirname(path) # Run the script and see if the compile command is in the script cmd = [path] # Add any script parameters if script_parameters: cmd.append(script_parameters) # Select fields to return cmd.append('| Select-Object -Property FullName, Extension, Exists, ' '@{Name="LastWriteTime";Expression={Get-Date ($_.LastWriteTime) ' '-Format g}}') cmd = ' '.join(cmd) ret = _pshell(cmd, cwd) if ret: # Script compiled, return results if ret.get('Exists'): log.info('DSC: Compile Config: %s', ret) return ret # If you get to this point, the script did not contain a compile command # dot source the script to compile the state and generate the mof file cmd = ['.', path] if script_parameters: cmd.append(script_parameters) cmd.extend([';', config_name]) if config_data: cmd.append(config_data) cmd.append('| Select-Object -Property FullName, Extension, Exists, ' '@{Name="LastWriteTime";Expression={Get-Date ($_.LastWriteTime) ' '-Format g}}') cmd = ' '.join(cmd) ret = _pshell(cmd, cwd) if ret: # Script compiled, return results if ret.get('Exists'): log.info('DSC: Compile Config: %s', ret) return ret error = 'Failed to compile config: {0}'.format(path) error += '\nReturned: {0}'.format(ret) log.error('DSC: %s', error) raise CommandExecutionError(error) def apply_config(path, source=None, salt_env='base'): r''' Run an compiled DSC configuration (a folder containing a .mof file). The folder can be cached from the salt master using the ``source`` option. Args: path (str): Local path to the directory that contains the .mof configuration file to apply. Required. source (str): Path to the directory that contains the .mof file on the ``file_roots``. The source directory will be copied to the path directory and then executed. If the path and source directories differ, the source directory will be applied. If source is not passed, the config located at ``path`` will be applied. Optional. salt_env (str): The salt environment to use when copying your source. Default is 'base' Returns: bool: True if successful, otherwise False CLI Example: To apply a config that already exists on the the system .. code-block:: bash salt '*' dsc.apply_config C:\\DSC\\WebSiteConfiguration To cache a configuration from the master and apply it: .. code-block:: bash salt '*' dsc.apply_config C:\\DSC\\WebSiteConfiguration salt://dsc/configs/WebSiteConfiguration ''' # If you're getting an error along the lines of "The client cannot connect # to the destination specified in the request.", try the following: # Enable-PSRemoting -SkipNetworkProfileCheck config = path if source: # Make sure the folder names match path_name = os.path.basename(os.path.normpath(path)) source_name = os.path.basename(os.path.normpath(source)) if path_name.lower() != source_name.lower(): # Append the Source name to the Path path = '{0}\\{1}'.format(path, source_name) log.debug('DSC: %s appended to the path.', source_name) # Destination path minus the basename dest_path = os.path.dirname(os.path.normpath(path)) log.info('DSC: Caching %s', source) cached_files = __salt__['cp.get_dir'](source, dest_path, salt_env) if not cached_files: error = 'Failed to copy {0}'.format(source) log.error('DSC: %s', error) raise CommandExecutionError(error) else: config = os.path.dirname(cached_files[0]) # Make sure the path exists if not os.path.exists(config): error = '{0} not found'.format(config) log.error('DSC: %s', error) raise CommandExecutionError(error) # Run the DSC Configuration # Putting quotes around the parameter protects against command injection cmd = 'Start-DscConfiguration -Path "{0}" -Wait -Force'.format(config) _pshell(cmd) cmd = '$status = Get-DscConfigurationStatus; $status.Status' ret = _pshell(cmd) log.info('DSC: Apply Config: %s', ret) return ret == 'Success' or ret == {} def get_config(): ''' Get the current DSC Configuration Returns: dict: A dictionary representing the DSC Configuration on the machine Raises: CommandExecutionError: On failure CLI Example: .. code-block:: bash salt '*' dsc.get_config ''' cmd = 'Get-DscConfiguration | Select-Object * -ExcludeProperty Cim*' try: raw_config = _pshell(cmd, ignore_retcode=True) except CommandExecutionError as exc: if 'Current configuration does not exist' in exc.info['stderr']: raise CommandExecutionError('Not Configured') raise config = dict() if raw_config: # Get DSC Configuration Name if 'ConfigurationName' in raw_config[0]: config[raw_config[0]['ConfigurationName']] = {} # Add all DSC Configurations by ResourceId for item in raw_config: config[item['ConfigurationName']][item['ResourceId']] = {} for key in item: if key not in ['ConfigurationName', 'ResourceId']: config[item['ConfigurationName']][item['ResourceId']][key] = item[key] return config def remove_config(reset=False): ''' Remove the current DSC Configuration. Removes current, pending, and previous dsc configurations. .. versionadded:: 2017.7.5 Args: reset (bool): Attempts to reset the DSC configuration by removing the following from ``C:\\Windows\\System32\\Configuration``: - File: DSCStatusHistory.mof - File: DSCEngineCache.mof - Dir: ConfigurationStatus Default is False .. warning:: ``remove_config`` may fail to reset the DSC environment if any of the files in the ``ConfigurationStatus`` directory. If you wait a few minutes and run again, it may complete successfully. Returns: bool: True if successful Raises: CommandExecutionError: On failure CLI Example: .. code-block:: bash salt '*' dsc.remove_config True ''' # Stopping a running config (not likely to occur) cmd = 'Stop-DscConfiguration' log.info('DSC: Stopping Running Configuration') try: _pshell(cmd) except CommandExecutionError as exc: if exc.info['retcode'] != 0: raise CommandExecutionError('Failed to Stop DSC Configuration', info=exc.info) log.info('DSC: %s', exc.info['stdout']) # Remove configuration files cmd = 'Remove-DscConfigurationDocument -Stage Current, Pending, Previous ' \ '-Force' log.info('DSC: Removing Configuration') try: _pshell(cmd) except CommandExecutionError as exc: if exc.info['retcode'] != 0: raise CommandExecutionError('Failed to remove DSC Configuration', info=exc.info) log.info('DSC: %s', exc.info['stdout']) if not reset: return True def _remove_fs_obj(path): if os.path.exists(path): log.info('DSC: Removing %s', path) if not __salt__['file.remove'](path): error = 'Failed to remove {0}'.format(path) log.error('DSC: %s', error) raise CommandExecutionError(error) dsc_config_dir = '{0}\\System32\\Configuration' \ ''.format(os.getenv('SystemRoot', 'C:\\Windows')) # Remove History _remove_fs_obj('{0}\\DSCStatusHistory.mof'.format(dsc_config_dir)) # Remove Engine Cache _remove_fs_obj('{0}\\DSCEngineCache.mof'.format(dsc_config_dir)) # Remove Status Directory _remove_fs_obj('{0}\\ConfigurationStatus'.format(dsc_config_dir)) return True def test_config(): ''' Tests the current applied DSC Configuration Returns: bool: True if successfully applied, otherwise False CLI Example: .. code-block:: bash salt '*' dsc.test_config ''' cmd = 'Test-DscConfiguration' try: _pshell(cmd, ignore_retcode=True) except CommandExecutionError as exc: if 'Current configuration does not exist' in exc.info['stderr']: raise CommandExecutionError('Not Configured') raise def get_config_status(): ''' Get the status of the current DSC Configuration Returns: dict: A dictionary representing the status of the current DSC Configuration on the machine CLI Example: .. code-block:: bash salt '*' dsc.get_config_status ''' cmd = 'Get-DscConfigurationStatus | ' \ 'Select-Object -Property HostName, Status, MetaData, ' \ '@{Name="StartDate";Expression={Get-Date ($_.StartDate) -Format g}}, ' \ 'Type, Mode, RebootRequested, NumberofResources' try: return _pshell(cmd, ignore_retcode=True) except CommandExecutionError as exc: if 'No status information available' in exc.info['stderr']: raise CommandExecutionError('Not Configured') raise def get_lcm_config(): ''' Get the current Local Configuration Manager settings Returns: dict: A dictionary representing the Local Configuration Manager settings on the machine CLI Example: .. code-block:: bash salt '*' dsc.get_lcm_config ''' cmd = 'Get-DscLocalConfigurationManager | ' \ 'Select-Object -Property ConfigurationModeFrequencyMins, LCMState, ' \ 'RebootNodeIfNeeded, ConfigurationMode, ActionAfterReboot, ' \ 'RefreshMode, CertificateID, ConfigurationID, RefreshFrequencyMins, ' \ 'AllowModuleOverwrite, DebugMode, StatusRetentionTimeInDays ' return _pshell(cmd) def set_lcm_config(config_mode=None, config_mode_freq=None, refresh_freq=None, reboot_if_needed=None, action_after_reboot=None, refresh_mode=None, certificate_id=None, configuration_id=None, allow_module_overwrite=None, debug_mode=False, status_retention_days=None): ''' For detailed descriptions of the parameters see: https://msdn.microsoft.com/en-us/PowerShell/DSC/metaConfig config_mode (str): How the LCM applies the configuration. Valid values are: - ApplyOnly - ApplyAndMonitor - ApplyAndAutoCorrect config_mode_freq (int): How often, in minutes, the current configuration is checked and applied. Ignored if config_mode is set to ApplyOnly. Default is 15. refresh_mode (str): How the LCM gets configurations. Valid values are: - Disabled - Push - Pull refresh_freq (int): How often, in minutes, the LCM checks for updated configurations. (pull mode only) Default is 30. reboot_if_needed (bool): Reboot the machine if needed after a configuration is applied. Default is False. action_after_reboot (str): Action to take after reboot. Valid values are: - ContinueConfiguration - StopConfiguration certificate_id (guid): A GUID that specifies a certificate used to access the configuration: (pull mode) configuration_id (guid): A GUID that identifies the config file to get from a pull server. (pull mode) allow_module_overwrite (bool): New configs are allowed to overwrite old ones on the target node. debug_mode (str): Sets the debug level. Valid values are: - None - ForceModuleImport - All status_retention_days (int): Number of days to keep status of the current config. .. note:: Either ``config_mode_freq`` or ``refresh_freq`` needs to be a multiple of the other. See documentation on MSDN for more details. Returns: bool: True if successful, otherwise False CLI Example: .. code-block:: bash salt '*' dsc.set_lcm_config ApplyOnly ''' temp_dir = os.getenv('TEMP', '{0}\\temp'.format(os.getenv('WINDIR'))) cmd = 'Configuration SaltConfig {' cmd += ' Node localhost {' cmd += ' LocalConfigurationManager {' if config_mode: if config_mode not in ('ApplyOnly', 'ApplyAndMonitor', 'ApplyAndAutoCorrect'): error = 'config_mode must be one of ApplyOnly, ApplyAndMonitor, ' \ 'or ApplyAndAutoCorrect. Passed {0}'.format(config_mode) raise SaltInvocationError(error) cmd += ' ConfigurationMode = "{0}";'.format(config_mode) if config_mode_freq: if not isinstance(config_mode_freq, int): error = 'config_mode_freq must be an integer. Passed {0}'.format( config_mode_freq ) raise SaltInvocationError(error) cmd += ' ConfigurationModeFrequencyMins = {0};'.format(config_mode_freq) if refresh_mode: if refresh_mode not in ('Disabled', 'Push', 'Pull'): raise SaltInvocationError( 'refresh_mode must be one of Disabled, Push, or Pull' ) cmd += ' RefreshMode = "{0}";'.format(refresh_mode) if refresh_freq: if not isinstance(refresh_freq, int): raise SaltInvocationError('refresh_freq must be an integer') cmd += ' RefreshFrequencyMins = {0};'.format(refresh_freq) if reboot_if_needed is not None: if not isinstance(reboot_if_needed, bool): raise SaltInvocationError('reboot_if_needed must be a boolean value') if reboot_if_needed: reboot_if_needed = '$true' else: reboot_if_needed = '$false' cmd += ' RebootNodeIfNeeded = {0};'.format(reboot_if_needed) if action_after_reboot: if action_after_reboot not in ('ContinueConfiguration', 'StopConfiguration'): raise SaltInvocationError( 'action_after_reboot must be one of ' 'ContinueConfiguration or StopConfiguration' ) cmd += ' ActionAfterReboot = "{0}"'.format(action_after_reboot) if certificate_id is not None: if certificate_id == '': certificate_id = None cmd += ' CertificateID = "{0}";'.format(certificate_id) if configuration_id is not None: if configuration_id == '': configuration_id = None cmd += ' ConfigurationID = "{0}";'.format(configuration_id) if allow_module_overwrite is not None: if not isinstance(allow_module_overwrite, bool): raise SaltInvocationError('allow_module_overwrite must be a boolean value') if allow_module_overwrite: allow_module_overwrite = '$true' else: allow_module_overwrite = '$false' cmd += ' AllowModuleOverwrite = {0};'.format(allow_module_overwrite) if debug_mode is not False: if debug_mode is None: debug_mode = 'None' if debug_mode not in ('None', 'ForceModuleImport', 'All'): raise SaltInvocationError( 'debug_mode must be one of None, ForceModuleImport, ' 'ResourceScriptBreakAll, or All' ) cmd += ' DebugMode = "{0}";'.format(debug_mode) if status_retention_days: if not isinstance(status_retention_days, int): raise SaltInvocationError('status_retention_days must be an integer') cmd += ' StatusRetentionTimeInDays = {0};'.format(status_retention_days) cmd += ' }}};' cmd += r'SaltConfig -OutputPath "{0}\SaltConfig"'.format(temp_dir) # Execute Config to create the .mof _pshell(cmd) # Apply the config cmd = r'Set-DscLocalConfigurationManager -Path "{0}\SaltConfig"' \ r''.format(temp_dir) ret = __salt__['cmd.run_all'](cmd, shell='powershell', python_shell=True) __salt__['file.remove'](r'{0}\SaltConfig'.format(temp_dir)) if not ret['retcode']: log.info('DSC: LCM config applied successfully') return True else: log.error('DSC: Failed to apply LCM config. Error %s', ret) return False
saltstack/salt
salt/modules/win_dsc.py
get_config_status
python
def get_config_status(): ''' Get the status of the current DSC Configuration Returns: dict: A dictionary representing the status of the current DSC Configuration on the machine CLI Example: .. code-block:: bash salt '*' dsc.get_config_status ''' cmd = 'Get-DscConfigurationStatus | ' \ 'Select-Object -Property HostName, Status, MetaData, ' \ '@{Name="StartDate";Expression={Get-Date ($_.StartDate) -Format g}}, ' \ 'Type, Mode, RebootRequested, NumberofResources' try: return _pshell(cmd, ignore_retcode=True) except CommandExecutionError as exc: if 'No status information available' in exc.info['stderr']: raise CommandExecutionError('Not Configured') raise
Get the status of the current DSC Configuration Returns: dict: A dictionary representing the status of the current DSC Configuration on the machine CLI Example: .. code-block:: bash salt '*' dsc.get_config_status
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_dsc.py#L589-L612
[ "def _pshell(cmd, cwd=None, json_depth=2, ignore_retcode=False):\n '''\n Execute the desired PowerShell command and ensure that it returns data\n in json format and load that into python. Either return a dict or raise a\n CommandExecutionError.\n '''\n if 'convertto-json' not in cmd.lower():\n cmd = '{0} | ConvertTo-Json -Depth {1}'.format(cmd, json_depth)\n log.debug('DSC: %s', cmd)\n results = __salt__['cmd.run_all'](\n cmd, shell='powershell', cwd=cwd, python_shell=True,\n ignore_retcode=ignore_retcode)\n\n if 'pid' in results:\n del results['pid']\n\n if 'retcode' not in results or results['retcode'] != 0:\n # run_all logs an error to log.error, fail hard back to the user\n raise CommandExecutionError(\n 'Issue executing PowerShell {0}'.format(cmd), info=results)\n\n # Sometimes Powershell returns an empty string, which isn't valid JSON\n if results['stdout'] == '':\n results['stdout'] = '{}'\n\n try:\n ret = salt.utils.json.loads(results['stdout'], strict=False)\n except ValueError:\n raise CommandExecutionError(\n 'No JSON results from PowerShell', info=results)\n\n log.info('DSC: Returning \"%s\"', ret)\n return ret\n" ]
# -*- coding: utf-8 -*- ''' Module for working with Windows PowerShell DSC (Desired State Configuration) This module is Alpha This module applies DSC Configurations in the form of PowerShell scripts or MOF (Managed Object Format) schema files. Use the ``psget`` module to manage PowerShell resources. The idea is to leverage Salt to push DSC configuration scripts or MOF files to the Minion. :depends: - PowerShell 5.0 ''' from __future__ import absolute_import, unicode_literals, print_function # Import Python libs import logging import os # Import Salt libs import salt.utils.json import salt.utils.platform import salt.utils.versions from salt.exceptions import CommandExecutionError, SaltInvocationError # Set up logging log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'dsc' def __virtual__(): ''' Set the system module of the kernel is Windows ''' # Verify Windows if not salt.utils.platform.is_windows(): log.debug('DSC: Only available on Windows systems') return False, 'DSC: Only available on Windows systems' # Verify PowerShell powershell_info = __salt__['cmd.shell_info']('powershell') if not powershell_info['installed']: log.debug('DSC: Requires PowerShell') return False, 'DSC: Requires PowerShell' # Verify PowerShell 5.0 or greater if salt.utils.versions.compare(powershell_info['version'], '<', '5.0'): log.debug('DSC: Requires PowerShell 5 or later') return False, 'DSC: Requires PowerShell 5 or later' return __virtualname__ def _pshell(cmd, cwd=None, json_depth=2, ignore_retcode=False): ''' Execute the desired PowerShell command and ensure that it returns data in json format and load that into python. Either return a dict or raise a CommandExecutionError. ''' if 'convertto-json' not in cmd.lower(): cmd = '{0} | ConvertTo-Json -Depth {1}'.format(cmd, json_depth) log.debug('DSC: %s', cmd) results = __salt__['cmd.run_all']( cmd, shell='powershell', cwd=cwd, python_shell=True, ignore_retcode=ignore_retcode) if 'pid' in results: del results['pid'] if 'retcode' not in results or results['retcode'] != 0: # run_all logs an error to log.error, fail hard back to the user raise CommandExecutionError( 'Issue executing PowerShell {0}'.format(cmd), info=results) # Sometimes Powershell returns an empty string, which isn't valid JSON if results['stdout'] == '': results['stdout'] = '{}' try: ret = salt.utils.json.loads(results['stdout'], strict=False) except ValueError: raise CommandExecutionError( 'No JSON results from PowerShell', info=results) log.info('DSC: Returning "%s"', ret) return ret def run_config(path, source=None, config_name=None, config_data=None, config_data_source=None, script_parameters=None, salt_env='base'): r''' Compile a DSC Configuration in the form of a PowerShell script (.ps1) and apply it. The PowerShell script can be cached from the master using the ``source`` option. If there is more than one config within the PowerShell script, the desired configuration can be applied by passing the name in the ``config`` option. This command would be the equivalent of running ``dsc.compile_config`` followed by ``dsc.apply_config``. Args: path (str): The local path to the PowerShell script that contains the DSC Configuration. Required. source (str): The path to the script on ``file_roots`` to cache at the location specified by ``path``. The source file will be cached locally and then executed. If source is not passed, the config script located at ``path`` will be compiled. Optional. config_name (str): The name of the Configuration within the script to apply. If the script contains multiple configurations within the file a ``config_name`` must be specified. If the ``config_name`` is not specified, the name of the file will be used as the ``config_name`` to run. Optional. config_data (str): Configuration data in the form of a hash table that will be passed to the ``ConfigurationData`` parameter when the ``config_name`` is compiled. This can be the path to a ``.psd1`` file containing the proper hash table or the PowerShell code to create the hash table. .. versionadded:: 2017.7.0 config_data_source (str): The path to the ``.psd1`` file on ``file_roots`` to cache at the location specified by ``config_data``. If this is specified, ``config_data`` must be a local path instead of a hash table. .. versionadded:: 2017.7.0 script_parameters (str): Any additional parameters expected by the configuration script. These must be defined in the script itself. .. versionadded:: 2017.7.0 salt_env (str): The salt environment to use when copying the source. Default is 'base' Returns: bool: True if successfully compiled and applied, otherwise False CLI Example: To compile a config from a script that already exists on the system: .. code-block:: bash salt '*' dsc.run_config C:\\DSC\\WebsiteConfig.ps1 To cache a config script to the system from the master and compile it: .. code-block:: bash salt '*' dsc.run_config C:\\DSC\\WebsiteConfig.ps1 salt://dsc/configs/WebsiteConfig.ps1 ''' ret = compile_config(path=path, source=source, config_name=config_name, config_data=config_data, config_data_source=config_data_source, script_parameters=script_parameters, salt_env=salt_env) if ret.get('Exists'): config_path = os.path.dirname(ret['FullName']) return apply_config(config_path) else: return False def compile_config(path, source=None, config_name=None, config_data=None, config_data_source=None, script_parameters=None, salt_env='base'): r''' Compile a config from a PowerShell script (``.ps1``) Args: path (str): Path (local) to the script that will create the ``.mof`` configuration file. If no source is passed, the file must exist locally. Required. source (str): Path to the script on ``file_roots`` to cache at the location specified by ``path``. The source file will be cached locally and then executed. If source is not passed, the config script located at ``path`` will be compiled. Optional. config_name (str): The name of the Configuration within the script to apply. If the script contains multiple configurations within the file a ``config_name`` must be specified. If the ``config_name`` is not specified, the name of the file will be used as the ``config_name`` to run. Optional. config_data (str): Configuration data in the form of a hash table that will be passed to the ``ConfigurationData`` parameter when the ``config_name`` is compiled. This can be the path to a ``.psd1`` file containing the proper hash table or the PowerShell code to create the hash table. .. versionadded:: 2017.7.0 config_data_source (str): The path to the ``.psd1`` file on ``file_roots`` to cache at the location specified by ``config_data``. If this is specified, ``config_data`` must be a local path instead of a hash table. .. versionadded:: 2017.7.0 script_parameters (str): Any additional parameters expected by the configuration script. These must be defined in the script itself. .. versionadded:: 2017.7.0 salt_env (str): The salt environment to use when copying the source. Default is 'base' Returns: dict: A dictionary containing the results of the compilation CLI Example: To compile a config from a script that already exists on the system: .. code-block:: bash salt '*' dsc.compile_config C:\\DSC\\WebsiteConfig.ps1 To cache a config script to the system from the master and compile it: .. code-block:: bash salt '*' dsc.compile_config C:\\DSC\\WebsiteConfig.ps1 salt://dsc/configs/WebsiteConfig.ps1 ''' if source: log.info('DSC: Caching %s', source) cached_files = __salt__['cp.get_file'](path=source, dest=path, saltenv=salt_env, makedirs=True) if not cached_files: error = 'Failed to cache {0}'.format(source) log.error('DSC: %s', error) raise CommandExecutionError(error) if config_data_source: log.info('DSC: Caching %s', config_data_source) cached_files = __salt__['cp.get_file'](path=config_data_source, dest=config_data, saltenv=salt_env, makedirs=True) if not cached_files: error = 'Failed to cache {0}'.format(config_data_source) log.error('DSC: %s', error) raise CommandExecutionError(error) # Make sure the path exists if not os.path.exists(path): error = '"{0}" not found'.format(path) log.error('DSC: %s', error) raise CommandExecutionError(error) if config_name is None: # If the name of the config isn't passed, make it the name of the .ps1 config_name = os.path.splitext(os.path.basename(path))[0] cwd = os.path.dirname(path) # Run the script and see if the compile command is in the script cmd = [path] # Add any script parameters if script_parameters: cmd.append(script_parameters) # Select fields to return cmd.append('| Select-Object -Property FullName, Extension, Exists, ' '@{Name="LastWriteTime";Expression={Get-Date ($_.LastWriteTime) ' '-Format g}}') cmd = ' '.join(cmd) ret = _pshell(cmd, cwd) if ret: # Script compiled, return results if ret.get('Exists'): log.info('DSC: Compile Config: %s', ret) return ret # If you get to this point, the script did not contain a compile command # dot source the script to compile the state and generate the mof file cmd = ['.', path] if script_parameters: cmd.append(script_parameters) cmd.extend([';', config_name]) if config_data: cmd.append(config_data) cmd.append('| Select-Object -Property FullName, Extension, Exists, ' '@{Name="LastWriteTime";Expression={Get-Date ($_.LastWriteTime) ' '-Format g}}') cmd = ' '.join(cmd) ret = _pshell(cmd, cwd) if ret: # Script compiled, return results if ret.get('Exists'): log.info('DSC: Compile Config: %s', ret) return ret error = 'Failed to compile config: {0}'.format(path) error += '\nReturned: {0}'.format(ret) log.error('DSC: %s', error) raise CommandExecutionError(error) def apply_config(path, source=None, salt_env='base'): r''' Run an compiled DSC configuration (a folder containing a .mof file). The folder can be cached from the salt master using the ``source`` option. Args: path (str): Local path to the directory that contains the .mof configuration file to apply. Required. source (str): Path to the directory that contains the .mof file on the ``file_roots``. The source directory will be copied to the path directory and then executed. If the path and source directories differ, the source directory will be applied. If source is not passed, the config located at ``path`` will be applied. Optional. salt_env (str): The salt environment to use when copying your source. Default is 'base' Returns: bool: True if successful, otherwise False CLI Example: To apply a config that already exists on the the system .. code-block:: bash salt '*' dsc.apply_config C:\\DSC\\WebSiteConfiguration To cache a configuration from the master and apply it: .. code-block:: bash salt '*' dsc.apply_config C:\\DSC\\WebSiteConfiguration salt://dsc/configs/WebSiteConfiguration ''' # If you're getting an error along the lines of "The client cannot connect # to the destination specified in the request.", try the following: # Enable-PSRemoting -SkipNetworkProfileCheck config = path if source: # Make sure the folder names match path_name = os.path.basename(os.path.normpath(path)) source_name = os.path.basename(os.path.normpath(source)) if path_name.lower() != source_name.lower(): # Append the Source name to the Path path = '{0}\\{1}'.format(path, source_name) log.debug('DSC: %s appended to the path.', source_name) # Destination path minus the basename dest_path = os.path.dirname(os.path.normpath(path)) log.info('DSC: Caching %s', source) cached_files = __salt__['cp.get_dir'](source, dest_path, salt_env) if not cached_files: error = 'Failed to copy {0}'.format(source) log.error('DSC: %s', error) raise CommandExecutionError(error) else: config = os.path.dirname(cached_files[0]) # Make sure the path exists if not os.path.exists(config): error = '{0} not found'.format(config) log.error('DSC: %s', error) raise CommandExecutionError(error) # Run the DSC Configuration # Putting quotes around the parameter protects against command injection cmd = 'Start-DscConfiguration -Path "{0}" -Wait -Force'.format(config) _pshell(cmd) cmd = '$status = Get-DscConfigurationStatus; $status.Status' ret = _pshell(cmd) log.info('DSC: Apply Config: %s', ret) return ret == 'Success' or ret == {} def get_config(): ''' Get the current DSC Configuration Returns: dict: A dictionary representing the DSC Configuration on the machine Raises: CommandExecutionError: On failure CLI Example: .. code-block:: bash salt '*' dsc.get_config ''' cmd = 'Get-DscConfiguration | Select-Object * -ExcludeProperty Cim*' try: raw_config = _pshell(cmd, ignore_retcode=True) except CommandExecutionError as exc: if 'Current configuration does not exist' in exc.info['stderr']: raise CommandExecutionError('Not Configured') raise config = dict() if raw_config: # Get DSC Configuration Name if 'ConfigurationName' in raw_config[0]: config[raw_config[0]['ConfigurationName']] = {} # Add all DSC Configurations by ResourceId for item in raw_config: config[item['ConfigurationName']][item['ResourceId']] = {} for key in item: if key not in ['ConfigurationName', 'ResourceId']: config[item['ConfigurationName']][item['ResourceId']][key] = item[key] return config def remove_config(reset=False): ''' Remove the current DSC Configuration. Removes current, pending, and previous dsc configurations. .. versionadded:: 2017.7.5 Args: reset (bool): Attempts to reset the DSC configuration by removing the following from ``C:\\Windows\\System32\\Configuration``: - File: DSCStatusHistory.mof - File: DSCEngineCache.mof - Dir: ConfigurationStatus Default is False .. warning:: ``remove_config`` may fail to reset the DSC environment if any of the files in the ``ConfigurationStatus`` directory. If you wait a few minutes and run again, it may complete successfully. Returns: bool: True if successful Raises: CommandExecutionError: On failure CLI Example: .. code-block:: bash salt '*' dsc.remove_config True ''' # Stopping a running config (not likely to occur) cmd = 'Stop-DscConfiguration' log.info('DSC: Stopping Running Configuration') try: _pshell(cmd) except CommandExecutionError as exc: if exc.info['retcode'] != 0: raise CommandExecutionError('Failed to Stop DSC Configuration', info=exc.info) log.info('DSC: %s', exc.info['stdout']) # Remove configuration files cmd = 'Remove-DscConfigurationDocument -Stage Current, Pending, Previous ' \ '-Force' log.info('DSC: Removing Configuration') try: _pshell(cmd) except CommandExecutionError as exc: if exc.info['retcode'] != 0: raise CommandExecutionError('Failed to remove DSC Configuration', info=exc.info) log.info('DSC: %s', exc.info['stdout']) if not reset: return True def _remove_fs_obj(path): if os.path.exists(path): log.info('DSC: Removing %s', path) if not __salt__['file.remove'](path): error = 'Failed to remove {0}'.format(path) log.error('DSC: %s', error) raise CommandExecutionError(error) dsc_config_dir = '{0}\\System32\\Configuration' \ ''.format(os.getenv('SystemRoot', 'C:\\Windows')) # Remove History _remove_fs_obj('{0}\\DSCStatusHistory.mof'.format(dsc_config_dir)) # Remove Engine Cache _remove_fs_obj('{0}\\DSCEngineCache.mof'.format(dsc_config_dir)) # Remove Status Directory _remove_fs_obj('{0}\\ConfigurationStatus'.format(dsc_config_dir)) return True def restore_config(): ''' Reapplies the previous configuration. .. versionadded:: 2017.7.5 .. note:: The current configuration will be come the previous configuration. If run a second time back-to-back it is like toggling between two configs. Returns: bool: True if successfully restored Raises: CommandExecutionError: On failure CLI Example: .. code-block:: bash salt '*' dsc.restore_config ''' cmd = 'Restore-DscConfiguration' try: _pshell(cmd, ignore_retcode=True) except CommandExecutionError as exc: if 'A previous configuration does not exist' in exc.info['stderr']: raise CommandExecutionError('Previous Configuration Not Found') raise return True def test_config(): ''' Tests the current applied DSC Configuration Returns: bool: True if successfully applied, otherwise False CLI Example: .. code-block:: bash salt '*' dsc.test_config ''' cmd = 'Test-DscConfiguration' try: _pshell(cmd, ignore_retcode=True) except CommandExecutionError as exc: if 'Current configuration does not exist' in exc.info['stderr']: raise CommandExecutionError('Not Configured') raise def get_lcm_config(): ''' Get the current Local Configuration Manager settings Returns: dict: A dictionary representing the Local Configuration Manager settings on the machine CLI Example: .. code-block:: bash salt '*' dsc.get_lcm_config ''' cmd = 'Get-DscLocalConfigurationManager | ' \ 'Select-Object -Property ConfigurationModeFrequencyMins, LCMState, ' \ 'RebootNodeIfNeeded, ConfigurationMode, ActionAfterReboot, ' \ 'RefreshMode, CertificateID, ConfigurationID, RefreshFrequencyMins, ' \ 'AllowModuleOverwrite, DebugMode, StatusRetentionTimeInDays ' return _pshell(cmd) def set_lcm_config(config_mode=None, config_mode_freq=None, refresh_freq=None, reboot_if_needed=None, action_after_reboot=None, refresh_mode=None, certificate_id=None, configuration_id=None, allow_module_overwrite=None, debug_mode=False, status_retention_days=None): ''' For detailed descriptions of the parameters see: https://msdn.microsoft.com/en-us/PowerShell/DSC/metaConfig config_mode (str): How the LCM applies the configuration. Valid values are: - ApplyOnly - ApplyAndMonitor - ApplyAndAutoCorrect config_mode_freq (int): How often, in minutes, the current configuration is checked and applied. Ignored if config_mode is set to ApplyOnly. Default is 15. refresh_mode (str): How the LCM gets configurations. Valid values are: - Disabled - Push - Pull refresh_freq (int): How often, in minutes, the LCM checks for updated configurations. (pull mode only) Default is 30. reboot_if_needed (bool): Reboot the machine if needed after a configuration is applied. Default is False. action_after_reboot (str): Action to take after reboot. Valid values are: - ContinueConfiguration - StopConfiguration certificate_id (guid): A GUID that specifies a certificate used to access the configuration: (pull mode) configuration_id (guid): A GUID that identifies the config file to get from a pull server. (pull mode) allow_module_overwrite (bool): New configs are allowed to overwrite old ones on the target node. debug_mode (str): Sets the debug level. Valid values are: - None - ForceModuleImport - All status_retention_days (int): Number of days to keep status of the current config. .. note:: Either ``config_mode_freq`` or ``refresh_freq`` needs to be a multiple of the other. See documentation on MSDN for more details. Returns: bool: True if successful, otherwise False CLI Example: .. code-block:: bash salt '*' dsc.set_lcm_config ApplyOnly ''' temp_dir = os.getenv('TEMP', '{0}\\temp'.format(os.getenv('WINDIR'))) cmd = 'Configuration SaltConfig {' cmd += ' Node localhost {' cmd += ' LocalConfigurationManager {' if config_mode: if config_mode not in ('ApplyOnly', 'ApplyAndMonitor', 'ApplyAndAutoCorrect'): error = 'config_mode must be one of ApplyOnly, ApplyAndMonitor, ' \ 'or ApplyAndAutoCorrect. Passed {0}'.format(config_mode) raise SaltInvocationError(error) cmd += ' ConfigurationMode = "{0}";'.format(config_mode) if config_mode_freq: if not isinstance(config_mode_freq, int): error = 'config_mode_freq must be an integer. Passed {0}'.format( config_mode_freq ) raise SaltInvocationError(error) cmd += ' ConfigurationModeFrequencyMins = {0};'.format(config_mode_freq) if refresh_mode: if refresh_mode not in ('Disabled', 'Push', 'Pull'): raise SaltInvocationError( 'refresh_mode must be one of Disabled, Push, or Pull' ) cmd += ' RefreshMode = "{0}";'.format(refresh_mode) if refresh_freq: if not isinstance(refresh_freq, int): raise SaltInvocationError('refresh_freq must be an integer') cmd += ' RefreshFrequencyMins = {0};'.format(refresh_freq) if reboot_if_needed is not None: if not isinstance(reboot_if_needed, bool): raise SaltInvocationError('reboot_if_needed must be a boolean value') if reboot_if_needed: reboot_if_needed = '$true' else: reboot_if_needed = '$false' cmd += ' RebootNodeIfNeeded = {0};'.format(reboot_if_needed) if action_after_reboot: if action_after_reboot not in ('ContinueConfiguration', 'StopConfiguration'): raise SaltInvocationError( 'action_after_reboot must be one of ' 'ContinueConfiguration or StopConfiguration' ) cmd += ' ActionAfterReboot = "{0}"'.format(action_after_reboot) if certificate_id is not None: if certificate_id == '': certificate_id = None cmd += ' CertificateID = "{0}";'.format(certificate_id) if configuration_id is not None: if configuration_id == '': configuration_id = None cmd += ' ConfigurationID = "{0}";'.format(configuration_id) if allow_module_overwrite is not None: if not isinstance(allow_module_overwrite, bool): raise SaltInvocationError('allow_module_overwrite must be a boolean value') if allow_module_overwrite: allow_module_overwrite = '$true' else: allow_module_overwrite = '$false' cmd += ' AllowModuleOverwrite = {0};'.format(allow_module_overwrite) if debug_mode is not False: if debug_mode is None: debug_mode = 'None' if debug_mode not in ('None', 'ForceModuleImport', 'All'): raise SaltInvocationError( 'debug_mode must be one of None, ForceModuleImport, ' 'ResourceScriptBreakAll, or All' ) cmd += ' DebugMode = "{0}";'.format(debug_mode) if status_retention_days: if not isinstance(status_retention_days, int): raise SaltInvocationError('status_retention_days must be an integer') cmd += ' StatusRetentionTimeInDays = {0};'.format(status_retention_days) cmd += ' }}};' cmd += r'SaltConfig -OutputPath "{0}\SaltConfig"'.format(temp_dir) # Execute Config to create the .mof _pshell(cmd) # Apply the config cmd = r'Set-DscLocalConfigurationManager -Path "{0}\SaltConfig"' \ r''.format(temp_dir) ret = __salt__['cmd.run_all'](cmd, shell='powershell', python_shell=True) __salt__['file.remove'](r'{0}\SaltConfig'.format(temp_dir)) if not ret['retcode']: log.info('DSC: LCM config applied successfully') return True else: log.error('DSC: Failed to apply LCM config. Error %s', ret) return False
saltstack/salt
salt/modules/win_dsc.py
set_lcm_config
python
def set_lcm_config(config_mode=None, config_mode_freq=None, refresh_freq=None, reboot_if_needed=None, action_after_reboot=None, refresh_mode=None, certificate_id=None, configuration_id=None, allow_module_overwrite=None, debug_mode=False, status_retention_days=None): ''' For detailed descriptions of the parameters see: https://msdn.microsoft.com/en-us/PowerShell/DSC/metaConfig config_mode (str): How the LCM applies the configuration. Valid values are: - ApplyOnly - ApplyAndMonitor - ApplyAndAutoCorrect config_mode_freq (int): How often, in minutes, the current configuration is checked and applied. Ignored if config_mode is set to ApplyOnly. Default is 15. refresh_mode (str): How the LCM gets configurations. Valid values are: - Disabled - Push - Pull refresh_freq (int): How often, in minutes, the LCM checks for updated configurations. (pull mode only) Default is 30. reboot_if_needed (bool): Reboot the machine if needed after a configuration is applied. Default is False. action_after_reboot (str): Action to take after reboot. Valid values are: - ContinueConfiguration - StopConfiguration certificate_id (guid): A GUID that specifies a certificate used to access the configuration: (pull mode) configuration_id (guid): A GUID that identifies the config file to get from a pull server. (pull mode) allow_module_overwrite (bool): New configs are allowed to overwrite old ones on the target node. debug_mode (str): Sets the debug level. Valid values are: - None - ForceModuleImport - All status_retention_days (int): Number of days to keep status of the current config. .. note:: Either ``config_mode_freq`` or ``refresh_freq`` needs to be a multiple of the other. See documentation on MSDN for more details. Returns: bool: True if successful, otherwise False CLI Example: .. code-block:: bash salt '*' dsc.set_lcm_config ApplyOnly ''' temp_dir = os.getenv('TEMP', '{0}\\temp'.format(os.getenv('WINDIR'))) cmd = 'Configuration SaltConfig {' cmd += ' Node localhost {' cmd += ' LocalConfigurationManager {' if config_mode: if config_mode not in ('ApplyOnly', 'ApplyAndMonitor', 'ApplyAndAutoCorrect'): error = 'config_mode must be one of ApplyOnly, ApplyAndMonitor, ' \ 'or ApplyAndAutoCorrect. Passed {0}'.format(config_mode) raise SaltInvocationError(error) cmd += ' ConfigurationMode = "{0}";'.format(config_mode) if config_mode_freq: if not isinstance(config_mode_freq, int): error = 'config_mode_freq must be an integer. Passed {0}'.format( config_mode_freq ) raise SaltInvocationError(error) cmd += ' ConfigurationModeFrequencyMins = {0};'.format(config_mode_freq) if refresh_mode: if refresh_mode not in ('Disabled', 'Push', 'Pull'): raise SaltInvocationError( 'refresh_mode must be one of Disabled, Push, or Pull' ) cmd += ' RefreshMode = "{0}";'.format(refresh_mode) if refresh_freq: if not isinstance(refresh_freq, int): raise SaltInvocationError('refresh_freq must be an integer') cmd += ' RefreshFrequencyMins = {0};'.format(refresh_freq) if reboot_if_needed is not None: if not isinstance(reboot_if_needed, bool): raise SaltInvocationError('reboot_if_needed must be a boolean value') if reboot_if_needed: reboot_if_needed = '$true' else: reboot_if_needed = '$false' cmd += ' RebootNodeIfNeeded = {0};'.format(reboot_if_needed) if action_after_reboot: if action_after_reboot not in ('ContinueConfiguration', 'StopConfiguration'): raise SaltInvocationError( 'action_after_reboot must be one of ' 'ContinueConfiguration or StopConfiguration' ) cmd += ' ActionAfterReboot = "{0}"'.format(action_after_reboot) if certificate_id is not None: if certificate_id == '': certificate_id = None cmd += ' CertificateID = "{0}";'.format(certificate_id) if configuration_id is not None: if configuration_id == '': configuration_id = None cmd += ' ConfigurationID = "{0}";'.format(configuration_id) if allow_module_overwrite is not None: if not isinstance(allow_module_overwrite, bool): raise SaltInvocationError('allow_module_overwrite must be a boolean value') if allow_module_overwrite: allow_module_overwrite = '$true' else: allow_module_overwrite = '$false' cmd += ' AllowModuleOverwrite = {0};'.format(allow_module_overwrite) if debug_mode is not False: if debug_mode is None: debug_mode = 'None' if debug_mode not in ('None', 'ForceModuleImport', 'All'): raise SaltInvocationError( 'debug_mode must be one of None, ForceModuleImport, ' 'ResourceScriptBreakAll, or All' ) cmd += ' DebugMode = "{0}";'.format(debug_mode) if status_retention_days: if not isinstance(status_retention_days, int): raise SaltInvocationError('status_retention_days must be an integer') cmd += ' StatusRetentionTimeInDays = {0};'.format(status_retention_days) cmd += ' }}};' cmd += r'SaltConfig -OutputPath "{0}\SaltConfig"'.format(temp_dir) # Execute Config to create the .mof _pshell(cmd) # Apply the config cmd = r'Set-DscLocalConfigurationManager -Path "{0}\SaltConfig"' \ r''.format(temp_dir) ret = __salt__['cmd.run_all'](cmd, shell='powershell', python_shell=True) __salt__['file.remove'](r'{0}\SaltConfig'.format(temp_dir)) if not ret['retcode']: log.info('DSC: LCM config applied successfully') return True else: log.error('DSC: Failed to apply LCM config. Error %s', ret) return False
For detailed descriptions of the parameters see: https://msdn.microsoft.com/en-us/PowerShell/DSC/metaConfig config_mode (str): How the LCM applies the configuration. Valid values are: - ApplyOnly - ApplyAndMonitor - ApplyAndAutoCorrect config_mode_freq (int): How often, in minutes, the current configuration is checked and applied. Ignored if config_mode is set to ApplyOnly. Default is 15. refresh_mode (str): How the LCM gets configurations. Valid values are: - Disabled - Push - Pull refresh_freq (int): How often, in minutes, the LCM checks for updated configurations. (pull mode only) Default is 30. reboot_if_needed (bool): Reboot the machine if needed after a configuration is applied. Default is False. action_after_reboot (str): Action to take after reboot. Valid values are: - ContinueConfiguration - StopConfiguration certificate_id (guid): A GUID that specifies a certificate used to access the configuration: (pull mode) configuration_id (guid): A GUID that identifies the config file to get from a pull server. (pull mode) allow_module_overwrite (bool): New configs are allowed to overwrite old ones on the target node. debug_mode (str): Sets the debug level. Valid values are: - None - ForceModuleImport - All status_retention_days (int): Number of days to keep status of the current config. .. note:: Either ``config_mode_freq`` or ``refresh_freq`` needs to be a multiple of the other. See documentation on MSDN for more details. Returns: bool: True if successful, otherwise False CLI Example: .. code-block:: bash salt '*' dsc.set_lcm_config ApplyOnly
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_dsc.py#L637-L801
[ "def _pshell(cmd, cwd=None, json_depth=2, ignore_retcode=False):\n '''\n Execute the desired PowerShell command and ensure that it returns data\n in json format and load that into python. Either return a dict or raise a\n CommandExecutionError.\n '''\n if 'convertto-json' not in cmd.lower():\n cmd = '{0} | ConvertTo-Json -Depth {1}'.format(cmd, json_depth)\n log.debug('DSC: %s', cmd)\n results = __salt__['cmd.run_all'](\n cmd, shell='powershell', cwd=cwd, python_shell=True,\n ignore_retcode=ignore_retcode)\n\n if 'pid' in results:\n del results['pid']\n\n if 'retcode' not in results or results['retcode'] != 0:\n # run_all logs an error to log.error, fail hard back to the user\n raise CommandExecutionError(\n 'Issue executing PowerShell {0}'.format(cmd), info=results)\n\n # Sometimes Powershell returns an empty string, which isn't valid JSON\n if results['stdout'] == '':\n results['stdout'] = '{}'\n\n try:\n ret = salt.utils.json.loads(results['stdout'], strict=False)\n except ValueError:\n raise CommandExecutionError(\n 'No JSON results from PowerShell', info=results)\n\n log.info('DSC: Returning \"%s\"', ret)\n return ret\n" ]
# -*- coding: utf-8 -*- ''' Module for working with Windows PowerShell DSC (Desired State Configuration) This module is Alpha This module applies DSC Configurations in the form of PowerShell scripts or MOF (Managed Object Format) schema files. Use the ``psget`` module to manage PowerShell resources. The idea is to leverage Salt to push DSC configuration scripts or MOF files to the Minion. :depends: - PowerShell 5.0 ''' from __future__ import absolute_import, unicode_literals, print_function # Import Python libs import logging import os # Import Salt libs import salt.utils.json import salt.utils.platform import salt.utils.versions from salt.exceptions import CommandExecutionError, SaltInvocationError # Set up logging log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'dsc' def __virtual__(): ''' Set the system module of the kernel is Windows ''' # Verify Windows if not salt.utils.platform.is_windows(): log.debug('DSC: Only available on Windows systems') return False, 'DSC: Only available on Windows systems' # Verify PowerShell powershell_info = __salt__['cmd.shell_info']('powershell') if not powershell_info['installed']: log.debug('DSC: Requires PowerShell') return False, 'DSC: Requires PowerShell' # Verify PowerShell 5.0 or greater if salt.utils.versions.compare(powershell_info['version'], '<', '5.0'): log.debug('DSC: Requires PowerShell 5 or later') return False, 'DSC: Requires PowerShell 5 or later' return __virtualname__ def _pshell(cmd, cwd=None, json_depth=2, ignore_retcode=False): ''' Execute the desired PowerShell command and ensure that it returns data in json format and load that into python. Either return a dict or raise a CommandExecutionError. ''' if 'convertto-json' not in cmd.lower(): cmd = '{0} | ConvertTo-Json -Depth {1}'.format(cmd, json_depth) log.debug('DSC: %s', cmd) results = __salt__['cmd.run_all']( cmd, shell='powershell', cwd=cwd, python_shell=True, ignore_retcode=ignore_retcode) if 'pid' in results: del results['pid'] if 'retcode' not in results or results['retcode'] != 0: # run_all logs an error to log.error, fail hard back to the user raise CommandExecutionError( 'Issue executing PowerShell {0}'.format(cmd), info=results) # Sometimes Powershell returns an empty string, which isn't valid JSON if results['stdout'] == '': results['stdout'] = '{}' try: ret = salt.utils.json.loads(results['stdout'], strict=False) except ValueError: raise CommandExecutionError( 'No JSON results from PowerShell', info=results) log.info('DSC: Returning "%s"', ret) return ret def run_config(path, source=None, config_name=None, config_data=None, config_data_source=None, script_parameters=None, salt_env='base'): r''' Compile a DSC Configuration in the form of a PowerShell script (.ps1) and apply it. The PowerShell script can be cached from the master using the ``source`` option. If there is more than one config within the PowerShell script, the desired configuration can be applied by passing the name in the ``config`` option. This command would be the equivalent of running ``dsc.compile_config`` followed by ``dsc.apply_config``. Args: path (str): The local path to the PowerShell script that contains the DSC Configuration. Required. source (str): The path to the script on ``file_roots`` to cache at the location specified by ``path``. The source file will be cached locally and then executed. If source is not passed, the config script located at ``path`` will be compiled. Optional. config_name (str): The name of the Configuration within the script to apply. If the script contains multiple configurations within the file a ``config_name`` must be specified. If the ``config_name`` is not specified, the name of the file will be used as the ``config_name`` to run. Optional. config_data (str): Configuration data in the form of a hash table that will be passed to the ``ConfigurationData`` parameter when the ``config_name`` is compiled. This can be the path to a ``.psd1`` file containing the proper hash table or the PowerShell code to create the hash table. .. versionadded:: 2017.7.0 config_data_source (str): The path to the ``.psd1`` file on ``file_roots`` to cache at the location specified by ``config_data``. If this is specified, ``config_data`` must be a local path instead of a hash table. .. versionadded:: 2017.7.0 script_parameters (str): Any additional parameters expected by the configuration script. These must be defined in the script itself. .. versionadded:: 2017.7.0 salt_env (str): The salt environment to use when copying the source. Default is 'base' Returns: bool: True if successfully compiled and applied, otherwise False CLI Example: To compile a config from a script that already exists on the system: .. code-block:: bash salt '*' dsc.run_config C:\\DSC\\WebsiteConfig.ps1 To cache a config script to the system from the master and compile it: .. code-block:: bash salt '*' dsc.run_config C:\\DSC\\WebsiteConfig.ps1 salt://dsc/configs/WebsiteConfig.ps1 ''' ret = compile_config(path=path, source=source, config_name=config_name, config_data=config_data, config_data_source=config_data_source, script_parameters=script_parameters, salt_env=salt_env) if ret.get('Exists'): config_path = os.path.dirname(ret['FullName']) return apply_config(config_path) else: return False def compile_config(path, source=None, config_name=None, config_data=None, config_data_source=None, script_parameters=None, salt_env='base'): r''' Compile a config from a PowerShell script (``.ps1``) Args: path (str): Path (local) to the script that will create the ``.mof`` configuration file. If no source is passed, the file must exist locally. Required. source (str): Path to the script on ``file_roots`` to cache at the location specified by ``path``. The source file will be cached locally and then executed. If source is not passed, the config script located at ``path`` will be compiled. Optional. config_name (str): The name of the Configuration within the script to apply. If the script contains multiple configurations within the file a ``config_name`` must be specified. If the ``config_name`` is not specified, the name of the file will be used as the ``config_name`` to run. Optional. config_data (str): Configuration data in the form of a hash table that will be passed to the ``ConfigurationData`` parameter when the ``config_name`` is compiled. This can be the path to a ``.psd1`` file containing the proper hash table or the PowerShell code to create the hash table. .. versionadded:: 2017.7.0 config_data_source (str): The path to the ``.psd1`` file on ``file_roots`` to cache at the location specified by ``config_data``. If this is specified, ``config_data`` must be a local path instead of a hash table. .. versionadded:: 2017.7.0 script_parameters (str): Any additional parameters expected by the configuration script. These must be defined in the script itself. .. versionadded:: 2017.7.0 salt_env (str): The salt environment to use when copying the source. Default is 'base' Returns: dict: A dictionary containing the results of the compilation CLI Example: To compile a config from a script that already exists on the system: .. code-block:: bash salt '*' dsc.compile_config C:\\DSC\\WebsiteConfig.ps1 To cache a config script to the system from the master and compile it: .. code-block:: bash salt '*' dsc.compile_config C:\\DSC\\WebsiteConfig.ps1 salt://dsc/configs/WebsiteConfig.ps1 ''' if source: log.info('DSC: Caching %s', source) cached_files = __salt__['cp.get_file'](path=source, dest=path, saltenv=salt_env, makedirs=True) if not cached_files: error = 'Failed to cache {0}'.format(source) log.error('DSC: %s', error) raise CommandExecutionError(error) if config_data_source: log.info('DSC: Caching %s', config_data_source) cached_files = __salt__['cp.get_file'](path=config_data_source, dest=config_data, saltenv=salt_env, makedirs=True) if not cached_files: error = 'Failed to cache {0}'.format(config_data_source) log.error('DSC: %s', error) raise CommandExecutionError(error) # Make sure the path exists if not os.path.exists(path): error = '"{0}" not found'.format(path) log.error('DSC: %s', error) raise CommandExecutionError(error) if config_name is None: # If the name of the config isn't passed, make it the name of the .ps1 config_name = os.path.splitext(os.path.basename(path))[0] cwd = os.path.dirname(path) # Run the script and see if the compile command is in the script cmd = [path] # Add any script parameters if script_parameters: cmd.append(script_parameters) # Select fields to return cmd.append('| Select-Object -Property FullName, Extension, Exists, ' '@{Name="LastWriteTime";Expression={Get-Date ($_.LastWriteTime) ' '-Format g}}') cmd = ' '.join(cmd) ret = _pshell(cmd, cwd) if ret: # Script compiled, return results if ret.get('Exists'): log.info('DSC: Compile Config: %s', ret) return ret # If you get to this point, the script did not contain a compile command # dot source the script to compile the state and generate the mof file cmd = ['.', path] if script_parameters: cmd.append(script_parameters) cmd.extend([';', config_name]) if config_data: cmd.append(config_data) cmd.append('| Select-Object -Property FullName, Extension, Exists, ' '@{Name="LastWriteTime";Expression={Get-Date ($_.LastWriteTime) ' '-Format g}}') cmd = ' '.join(cmd) ret = _pshell(cmd, cwd) if ret: # Script compiled, return results if ret.get('Exists'): log.info('DSC: Compile Config: %s', ret) return ret error = 'Failed to compile config: {0}'.format(path) error += '\nReturned: {0}'.format(ret) log.error('DSC: %s', error) raise CommandExecutionError(error) def apply_config(path, source=None, salt_env='base'): r''' Run an compiled DSC configuration (a folder containing a .mof file). The folder can be cached from the salt master using the ``source`` option. Args: path (str): Local path to the directory that contains the .mof configuration file to apply. Required. source (str): Path to the directory that contains the .mof file on the ``file_roots``. The source directory will be copied to the path directory and then executed. If the path and source directories differ, the source directory will be applied. If source is not passed, the config located at ``path`` will be applied. Optional. salt_env (str): The salt environment to use when copying your source. Default is 'base' Returns: bool: True if successful, otherwise False CLI Example: To apply a config that already exists on the the system .. code-block:: bash salt '*' dsc.apply_config C:\\DSC\\WebSiteConfiguration To cache a configuration from the master and apply it: .. code-block:: bash salt '*' dsc.apply_config C:\\DSC\\WebSiteConfiguration salt://dsc/configs/WebSiteConfiguration ''' # If you're getting an error along the lines of "The client cannot connect # to the destination specified in the request.", try the following: # Enable-PSRemoting -SkipNetworkProfileCheck config = path if source: # Make sure the folder names match path_name = os.path.basename(os.path.normpath(path)) source_name = os.path.basename(os.path.normpath(source)) if path_name.lower() != source_name.lower(): # Append the Source name to the Path path = '{0}\\{1}'.format(path, source_name) log.debug('DSC: %s appended to the path.', source_name) # Destination path minus the basename dest_path = os.path.dirname(os.path.normpath(path)) log.info('DSC: Caching %s', source) cached_files = __salt__['cp.get_dir'](source, dest_path, salt_env) if not cached_files: error = 'Failed to copy {0}'.format(source) log.error('DSC: %s', error) raise CommandExecutionError(error) else: config = os.path.dirname(cached_files[0]) # Make sure the path exists if not os.path.exists(config): error = '{0} not found'.format(config) log.error('DSC: %s', error) raise CommandExecutionError(error) # Run the DSC Configuration # Putting quotes around the parameter protects against command injection cmd = 'Start-DscConfiguration -Path "{0}" -Wait -Force'.format(config) _pshell(cmd) cmd = '$status = Get-DscConfigurationStatus; $status.Status' ret = _pshell(cmd) log.info('DSC: Apply Config: %s', ret) return ret == 'Success' or ret == {} def get_config(): ''' Get the current DSC Configuration Returns: dict: A dictionary representing the DSC Configuration on the machine Raises: CommandExecutionError: On failure CLI Example: .. code-block:: bash salt '*' dsc.get_config ''' cmd = 'Get-DscConfiguration | Select-Object * -ExcludeProperty Cim*' try: raw_config = _pshell(cmd, ignore_retcode=True) except CommandExecutionError as exc: if 'Current configuration does not exist' in exc.info['stderr']: raise CommandExecutionError('Not Configured') raise config = dict() if raw_config: # Get DSC Configuration Name if 'ConfigurationName' in raw_config[0]: config[raw_config[0]['ConfigurationName']] = {} # Add all DSC Configurations by ResourceId for item in raw_config: config[item['ConfigurationName']][item['ResourceId']] = {} for key in item: if key not in ['ConfigurationName', 'ResourceId']: config[item['ConfigurationName']][item['ResourceId']][key] = item[key] return config def remove_config(reset=False): ''' Remove the current DSC Configuration. Removes current, pending, and previous dsc configurations. .. versionadded:: 2017.7.5 Args: reset (bool): Attempts to reset the DSC configuration by removing the following from ``C:\\Windows\\System32\\Configuration``: - File: DSCStatusHistory.mof - File: DSCEngineCache.mof - Dir: ConfigurationStatus Default is False .. warning:: ``remove_config`` may fail to reset the DSC environment if any of the files in the ``ConfigurationStatus`` directory. If you wait a few minutes and run again, it may complete successfully. Returns: bool: True if successful Raises: CommandExecutionError: On failure CLI Example: .. code-block:: bash salt '*' dsc.remove_config True ''' # Stopping a running config (not likely to occur) cmd = 'Stop-DscConfiguration' log.info('DSC: Stopping Running Configuration') try: _pshell(cmd) except CommandExecutionError as exc: if exc.info['retcode'] != 0: raise CommandExecutionError('Failed to Stop DSC Configuration', info=exc.info) log.info('DSC: %s', exc.info['stdout']) # Remove configuration files cmd = 'Remove-DscConfigurationDocument -Stage Current, Pending, Previous ' \ '-Force' log.info('DSC: Removing Configuration') try: _pshell(cmd) except CommandExecutionError as exc: if exc.info['retcode'] != 0: raise CommandExecutionError('Failed to remove DSC Configuration', info=exc.info) log.info('DSC: %s', exc.info['stdout']) if not reset: return True def _remove_fs_obj(path): if os.path.exists(path): log.info('DSC: Removing %s', path) if not __salt__['file.remove'](path): error = 'Failed to remove {0}'.format(path) log.error('DSC: %s', error) raise CommandExecutionError(error) dsc_config_dir = '{0}\\System32\\Configuration' \ ''.format(os.getenv('SystemRoot', 'C:\\Windows')) # Remove History _remove_fs_obj('{0}\\DSCStatusHistory.mof'.format(dsc_config_dir)) # Remove Engine Cache _remove_fs_obj('{0}\\DSCEngineCache.mof'.format(dsc_config_dir)) # Remove Status Directory _remove_fs_obj('{0}\\ConfigurationStatus'.format(dsc_config_dir)) return True def restore_config(): ''' Reapplies the previous configuration. .. versionadded:: 2017.7.5 .. note:: The current configuration will be come the previous configuration. If run a second time back-to-back it is like toggling between two configs. Returns: bool: True if successfully restored Raises: CommandExecutionError: On failure CLI Example: .. code-block:: bash salt '*' dsc.restore_config ''' cmd = 'Restore-DscConfiguration' try: _pshell(cmd, ignore_retcode=True) except CommandExecutionError as exc: if 'A previous configuration does not exist' in exc.info['stderr']: raise CommandExecutionError('Previous Configuration Not Found') raise return True def test_config(): ''' Tests the current applied DSC Configuration Returns: bool: True if successfully applied, otherwise False CLI Example: .. code-block:: bash salt '*' dsc.test_config ''' cmd = 'Test-DscConfiguration' try: _pshell(cmd, ignore_retcode=True) except CommandExecutionError as exc: if 'Current configuration does not exist' in exc.info['stderr']: raise CommandExecutionError('Not Configured') raise def get_config_status(): ''' Get the status of the current DSC Configuration Returns: dict: A dictionary representing the status of the current DSC Configuration on the machine CLI Example: .. code-block:: bash salt '*' dsc.get_config_status ''' cmd = 'Get-DscConfigurationStatus | ' \ 'Select-Object -Property HostName, Status, MetaData, ' \ '@{Name="StartDate";Expression={Get-Date ($_.StartDate) -Format g}}, ' \ 'Type, Mode, RebootRequested, NumberofResources' try: return _pshell(cmd, ignore_retcode=True) except CommandExecutionError as exc: if 'No status information available' in exc.info['stderr']: raise CommandExecutionError('Not Configured') raise def get_lcm_config(): ''' Get the current Local Configuration Manager settings Returns: dict: A dictionary representing the Local Configuration Manager settings on the machine CLI Example: .. code-block:: bash salt '*' dsc.get_lcm_config ''' cmd = 'Get-DscLocalConfigurationManager | ' \ 'Select-Object -Property ConfigurationModeFrequencyMins, LCMState, ' \ 'RebootNodeIfNeeded, ConfigurationMode, ActionAfterReboot, ' \ 'RefreshMode, CertificateID, ConfigurationID, RefreshFrequencyMins, ' \ 'AllowModuleOverwrite, DebugMode, StatusRetentionTimeInDays ' return _pshell(cmd)
saltstack/salt
salt/modules/sensors.py
sense
python
def sense(chip, fahrenheit=False): ''' Gather lm-sensors data from a given chip To determine the chip to query, use the 'sensors' command and see the leading line in the block. Example: /usr/bin/sensors coretemp-isa-0000 Adapter: ISA adapter Physical id 0: +56.0°C (high = +87.0°C, crit = +105.0°C) Core 0: +52.0°C (high = +87.0°C, crit = +105.0°C) Core 1: +50.0°C (high = +87.0°C, crit = +105.0°C) Core 2: +56.0°C (high = +87.0°C, crit = +105.0°C) Core 3: +53.0°C (high = +87.0°C, crit = +105.0°C) Given the above, the chip is 'coretemp-isa-0000'. ''' extra_args = '' if fahrenheit is True: extra_args = '-f' sensors = __salt__['cmd.run']('/usr/bin/sensors {0} {1}'.format(chip, extra_args), python_shell=False).splitlines() ret = {} for sensor in sensors: sensor_list = sensor.split(':') if len(sensor_list) >= 2: ret[sensor_list[0]] = sensor_list[1].lstrip() return ret
Gather lm-sensors data from a given chip To determine the chip to query, use the 'sensors' command and see the leading line in the block. Example: /usr/bin/sensors coretemp-isa-0000 Adapter: ISA adapter Physical id 0: +56.0°C (high = +87.0°C, crit = +105.0°C) Core 0: +52.0°C (high = +87.0°C, crit = +105.0°C) Core 1: +50.0°C (high = +87.0°C, crit = +105.0°C) Core 2: +56.0°C (high = +87.0°C, crit = +105.0°C) Core 3: +53.0°C (high = +87.0°C, crit = +105.0°C) Given the above, the chip is 'coretemp-isa-0000'.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/sensors.py#L25-L55
null
# -*- coding: utf-8 -*- ''' Read lm-sensors .. versionadded:: 2014.1.3 ''' from __future__ import absolute_import, unicode_literals, print_function # Import python libs import logging # import Salt libs import salt.utils.path log = logging.getLogger(__name__) def __virtual__(): if salt.utils.path.which('sensors'): return True return (False, 'sensors does not exist in the path')
saltstack/salt
salt/states/influxdb_retention_policy.py
convert_duration
python
def convert_duration(duration): ''' Convert the a duration string into XXhYYmZZs format duration Duration to convert Returns: duration_string String representation of duration in XXhYYmZZs format ''' # durations must be specified in days, weeks or hours if duration.endswith('h'): hours = int(duration.split('h')) elif duration.endswith('d'): days = duration.split('d') hours = int(days[0]) * 24 elif duration.endswith('w'): weeks = duration.split('w') hours = int(weeks[0]) * 24 * 7 duration_string = str(hours)+'h0m0s' return duration_string
Convert the a duration string into XXhYYmZZs format duration Duration to convert Returns: duration_string String representation of duration in XXhYYmZZs format
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/influxdb_retention_policy.py#L24-L49
null
# -*- coding: utf-8 -*- ''' Management of Influxdb retention policies ========================================= .. versionadded:: 2017.7.0 (compatible with InfluxDB version 0.9+) ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals def __virtual__(): ''' Only load if the influxdb module is available ''' if 'influxdb.db_exists' in __salt__: return 'influxdb_retention_policy' return False def present(name, database, duration="7d", replication=1, default=False, **client_args): ''' Ensure that given retention policy is present. name Name of the retention policy to create. database Database to create retention policy on. ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': 'retention policy {0} is already present'.format(name)} if not __salt__['influxdb.retention_policy_exists'](name=name, database=database, **client_args): if __opts__['test']: ret['result'] = None ret['comment'] = ' {0} is absent and will be created'\ .format(name) return ret if __salt__['influxdb.create_retention_policy']( database, name, duration, replication, default, **client_args ): ret['comment'] = 'retention policy {0} has been created'\ .format(name) ret['changes'][name] = 'Present' return ret else: ret['comment'] = 'Failed to create retention policy {0}'\ .format(name) ret['result'] = False return ret else: current_policy = __salt__['influxdb.get_retention_policy'](database=database, name=name) update_policy = False if current_policy['duration'] != convert_duration(duration): update_policy = True ret['changes']['duration'] = "Retention changed from {0} to {1}.".format(current_policy['duration'], duration) if current_policy['replicaN'] != replication: update_policy = True ret['changes']['replication'] = "Replication changed from {0} to {1}.".format(current_policy['replicaN'], replication) if current_policy['default'] != default: update_policy = True ret['changes']['default'] = "Default changed from {0} to {1}.".format(current_policy['default'], default) if update_policy: if __opts__['test']: ret['result'] = None ret['comment'] = ' {0} is present and set to be changed'\ .format(name) return ret else: if __salt__['influxdb.alter_retention_policy']( database, name, duration, replication, default, **client_args ): ret['comment'] = 'retention policy {0} has been changed'\ .format(name) return ret else: ret['comment'] = 'Failed to update retention policy {0}'\ .format(name) ret['result'] = False return ret return ret def absent(name, database, **client_args): ''' Ensure that given retention policy is absent. name Name of the retention policy to remove. database Name of the database that the retention policy was defined on. ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': 'retention policy {0} is not present'.format(name)} if __salt__['influxdb.retention_policy_exists'](database, name, **client_args): if __opts__['test']: ret['result'] = None ret['comment'] = ( 'retention policy {0} is present and needs to be removed' ).format(name) return ret if __salt__['influxdb.drop_retention_policy'](database, name, **client_args): ret['comment'] = 'retention policy {0} has been removed'\ .format(name) ret['changes'][name] = 'Absent' return ret else: ret['comment'] = 'Failed to remove retention policy {0}'\ .format(name) ret['result'] = False return ret return ret
saltstack/salt
salt/states/influxdb_retention_policy.py
present
python
def present(name, database, duration="7d", replication=1, default=False, **client_args): ''' Ensure that given retention policy is present. name Name of the retention policy to create. database Database to create retention policy on. ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': 'retention policy {0} is already present'.format(name)} if not __salt__['influxdb.retention_policy_exists'](name=name, database=database, **client_args): if __opts__['test']: ret['result'] = None ret['comment'] = ' {0} is absent and will be created'\ .format(name) return ret if __salt__['influxdb.create_retention_policy']( database, name, duration, replication, default, **client_args ): ret['comment'] = 'retention policy {0} has been created'\ .format(name) ret['changes'][name] = 'Present' return ret else: ret['comment'] = 'Failed to create retention policy {0}'\ .format(name) ret['result'] = False return ret else: current_policy = __salt__['influxdb.get_retention_policy'](database=database, name=name) update_policy = False if current_policy['duration'] != convert_duration(duration): update_policy = True ret['changes']['duration'] = "Retention changed from {0} to {1}.".format(current_policy['duration'], duration) if current_policy['replicaN'] != replication: update_policy = True ret['changes']['replication'] = "Replication changed from {0} to {1}.".format(current_policy['replicaN'], replication) if current_policy['default'] != default: update_policy = True ret['changes']['default'] = "Default changed from {0} to {1}.".format(current_policy['default'], default) if update_policy: if __opts__['test']: ret['result'] = None ret['comment'] = ' {0} is present and set to be changed'\ .format(name) return ret else: if __salt__['influxdb.alter_retention_policy']( database, name, duration, replication, default, **client_args ): ret['comment'] = 'retention policy {0} has been changed'\ .format(name) return ret else: ret['comment'] = 'Failed to update retention policy {0}'\ .format(name) ret['result'] = False return ret return ret
Ensure that given retention policy is present. name Name of the retention policy to create. database Database to create retention policy on.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/influxdb_retention_policy.py#L52-L126
[ "def convert_duration(duration):\n '''\n Convert the a duration string into XXhYYmZZs format\n\n duration\n Duration to convert\n\n Returns: duration_string\n String representation of duration in XXhYYmZZs format\n '''\n\n # durations must be specified in days, weeks or hours\n\n if duration.endswith('h'):\n hours = int(duration.split('h'))\n\n elif duration.endswith('d'):\n days = duration.split('d')\n hours = int(days[0]) * 24\n\n elif duration.endswith('w'):\n weeks = duration.split('w')\n hours = int(weeks[0]) * 24 * 7\n\n duration_string = str(hours)+'h0m0s'\n return duration_string\n" ]
# -*- coding: utf-8 -*- ''' Management of Influxdb retention policies ========================================= .. versionadded:: 2017.7.0 (compatible with InfluxDB version 0.9+) ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals def __virtual__(): ''' Only load if the influxdb module is available ''' if 'influxdb.db_exists' in __salt__: return 'influxdb_retention_policy' return False def convert_duration(duration): ''' Convert the a duration string into XXhYYmZZs format duration Duration to convert Returns: duration_string String representation of duration in XXhYYmZZs format ''' # durations must be specified in days, weeks or hours if duration.endswith('h'): hours = int(duration.split('h')) elif duration.endswith('d'): days = duration.split('d') hours = int(days[0]) * 24 elif duration.endswith('w'): weeks = duration.split('w') hours = int(weeks[0]) * 24 * 7 duration_string = str(hours)+'h0m0s' return duration_string def absent(name, database, **client_args): ''' Ensure that given retention policy is absent. name Name of the retention policy to remove. database Name of the database that the retention policy was defined on. ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': 'retention policy {0} is not present'.format(name)} if __salt__['influxdb.retention_policy_exists'](database, name, **client_args): if __opts__['test']: ret['result'] = None ret['comment'] = ( 'retention policy {0} is present and needs to be removed' ).format(name) return ret if __salt__['influxdb.drop_retention_policy'](database, name, **client_args): ret['comment'] = 'retention policy {0} has been removed'\ .format(name) ret['changes'][name] = 'Absent' return ret else: ret['comment'] = 'Failed to remove retention policy {0}'\ .format(name) ret['result'] = False return ret return ret
saltstack/salt
salt/modules/event.py
_dict_subset
python
def _dict_subset(keys, master_dict): ''' Return a dictionary of only the subset of keys/values specified in keys ''' return dict([(k, v) for k, v in six.iteritems(master_dict) if k in keys])
Return a dictionary of only the subset of keys/values specified in keys
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/event.py#L27-L31
null
# -*- coding: utf-8 -*- ''' Use the :ref:`Salt Event System <events>` to fire events from the master to the minion and vice-versa. ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import collections import logging import os import sys import traceback # Import salt libs import salt.crypt import salt.utils.event import salt.utils.zeromq import salt.payload import salt.transport.client from salt.ext import six __proxyenabled__ = ['*'] log = logging.getLogger(__name__) def fire_master(data, tag, preload=None, timeout=60): ''' Fire an event off up to the master server CLI Example: .. code-block:: bash salt '*' event.fire_master '{"data":"my event data"}' 'tag' ''' if (__opts__.get('local', None) or __opts__.get('file_client', None) == 'local') and not __opts__.get('use_master_when_local', False): # We can't send an event if we're in masterless mode log.warning('Local mode detected. Event with tag %s will NOT be sent.', tag) return False if preload or __opts__.get('__cli') == 'salt-call': # If preload is specified, we must send a raw event (this is # slower because it has to independently authenticate) if 'master_uri' not in __opts__: __opts__['master_uri'] = 'tcp://{ip}:{port}'.format( ip=salt.utils.zeromq.ip_bracket(__opts__['interface']), port=__opts__.get('ret_port', '4506') # TODO, no fallback ) masters = list() ret = None if 'master_uri_list' in __opts__: for master_uri in __opts__['master_uri_list']: masters.append(master_uri) else: masters.append(__opts__['master_uri']) auth = salt.crypt.SAuth(__opts__) load = {'id': __opts__['id'], 'tag': tag, 'data': data, 'tok': auth.gen_token(b'salt'), 'cmd': '_minion_event'} if isinstance(preload, dict): load.update(preload) for master in masters: channel = salt.transport.client.ReqChannel.factory(__opts__, master_uri=master) try: channel.send(load, timeout=timeout) # channel.send was successful. # Ensure ret is True. ret = True except Exception: # only set a False ret if it hasn't been sent atleast once if ret is None: ret = False finally: channel.close() return ret else: # Usually, we can send the event via the minion, which is faster # because it is already authenticated try: me = salt.utils.event.MinionEvent(__opts__, listen=False, keep_loop=True) return me.fire_event({'data': data, 'tag': tag, 'events': None, 'pretag': None}, 'fire_master') except Exception: exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) log.debug(lines) return False def fire(data, tag, timeout=None): ''' Fire an event on the local minion event bus. Data must be formed as a dict. CLI Example: .. code-block:: bash salt '*' event.fire '{"data":"my event data"}' 'tag' ''' if timeout is None: timeout = 60000 else: timeout = timeout * 1000 try: event = salt.utils.event.get_event(__opts__.get('__role', 'minion'), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'], opts=__opts__, keep_loop=True, listen=False) return event.fire_event(data, tag, timeout=timeout) except Exception: exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) log.debug(lines) return False def send(tag, data=None, preload=None, with_env=False, with_grains=False, with_pillar=False, with_env_opts=False, timeout=60, **kwargs): ''' Send an event to the Salt Master .. versionadded:: 2014.7.0 :param tag: A tag to give the event. Use slashes to create a namespace for related events. E.g., ``myco/build/buildserver1/start``, ``myco/build/buildserver1/success``, ``myco/build/buildserver1/failure``. :param data: A dictionary of data to send in the event. This is free-form. Send any data points that are needed for whoever is consuming the event. Arguments on the CLI are interpreted as YAML so complex data structures are possible. :param with_env: Include environment variables from the current shell environment in the event data as ``environ``.. This is a short-hand for working with systems that seed the environment with relevant data such as Jenkins. :type with_env: Specify ``True`` to include all environment variables, or specify a list of strings of variable names to include. :param with_grains: Include grains from the current minion in the event data as ``grains``. :type with_grains: Specify ``True`` to include all grains, or specify a list of strings of grain names to include. :param with_pillar: Include Pillar values from the current minion in the event data as ``pillar``. Remember Pillar data is often sensitive data so be careful. This is useful for passing ephemeral Pillar values through an event. Such as passing the ``pillar={}`` kwarg in :py:func:`state.sls <salt.modules.state.sls>` from the Master, through an event on the Minion, then back to the Master. :type with_pillar: Specify ``True`` to include all Pillar values, or specify a list of strings of Pillar keys to include. It is a best-practice to only specify a relevant subset of Pillar data. :param with_env_opts: Include ``saltenv`` and ``pillarenv`` set on minion at the moment when event is send into event data. :type with_env_opts: Specify ``True`` to include ``saltenv`` and ``pillarenv`` values or ``False`` to omit them. :param timeout: maximum duration to wait to connect to Salt's IPCMessageServer in seconds. Defaults to 60s :param kwargs: Any additional keyword arguments passed to this function will be interpreted as key-value pairs and included in the event data. This provides a convenient alternative to YAML for simple values. CLI Example: .. code-block:: bash salt-call event.send myco/mytag foo=Foo bar=Bar salt-call event.send 'myco/mytag' '{foo: Foo, bar: Bar}' A convenient way to allow Jenkins to execute ``salt-call`` is via sudo. The following rule in sudoers will allow the ``jenkins`` user to run only the following command. ``/etc/sudoers`` (allow preserving the environment): .. code-block:: text jenkins ALL=(ALL) NOPASSWD:SETENV: /usr/bin/salt-call event.send* Call Jenkins via sudo (preserve the environment): .. code-block:: bash sudo -E salt-call event.send myco/jenkins/build/success with_env='[BUILD_ID, BUILD_URL, GIT_BRANCH, GIT_COMMIT]' ''' data_dict = {} if with_env: if isinstance(with_env, list): data_dict['environ'] = _dict_subset(with_env, dict(os.environ)) else: data_dict['environ'] = dict(os.environ) if with_grains: if isinstance(with_grains, list): data_dict['grains'] = _dict_subset(with_grains, __grains__) else: data_dict['grains'] = __grains__ if with_pillar: if isinstance(with_pillar, list): data_dict['pillar'] = _dict_subset(with_pillar, __pillar__) else: data_dict['pillar'] = __pillar__ if with_env_opts: data_dict['saltenv'] = __opts__.get('saltenv', 'base') data_dict['pillarenv'] = __opts__.get('pillarenv') if kwargs: data_dict.update(kwargs) # Allow values in the ``data`` arg to override any of the above values. if isinstance(data, collections.Mapping): data_dict.update(data) if __opts__.get('local') or __opts__.get('file_client') == 'local' or __opts__.get('master_type') == 'disable': return fire(data_dict, tag, timeout=timeout) else: return fire_master(data_dict, tag, preload=preload, timeout=timeout)
saltstack/salt
salt/modules/event.py
fire_master
python
def fire_master(data, tag, preload=None, timeout=60): ''' Fire an event off up to the master server CLI Example: .. code-block:: bash salt '*' event.fire_master '{"data":"my event data"}' 'tag' ''' if (__opts__.get('local', None) or __opts__.get('file_client', None) == 'local') and not __opts__.get('use_master_when_local', False): # We can't send an event if we're in masterless mode log.warning('Local mode detected. Event with tag %s will NOT be sent.', tag) return False if preload or __opts__.get('__cli') == 'salt-call': # If preload is specified, we must send a raw event (this is # slower because it has to independently authenticate) if 'master_uri' not in __opts__: __opts__['master_uri'] = 'tcp://{ip}:{port}'.format( ip=salt.utils.zeromq.ip_bracket(__opts__['interface']), port=__opts__.get('ret_port', '4506') # TODO, no fallback ) masters = list() ret = None if 'master_uri_list' in __opts__: for master_uri in __opts__['master_uri_list']: masters.append(master_uri) else: masters.append(__opts__['master_uri']) auth = salt.crypt.SAuth(__opts__) load = {'id': __opts__['id'], 'tag': tag, 'data': data, 'tok': auth.gen_token(b'salt'), 'cmd': '_minion_event'} if isinstance(preload, dict): load.update(preload) for master in masters: channel = salt.transport.client.ReqChannel.factory(__opts__, master_uri=master) try: channel.send(load, timeout=timeout) # channel.send was successful. # Ensure ret is True. ret = True except Exception: # only set a False ret if it hasn't been sent atleast once if ret is None: ret = False finally: channel.close() return ret else: # Usually, we can send the event via the minion, which is faster # because it is already authenticated try: me = salt.utils.event.MinionEvent(__opts__, listen=False, keep_loop=True) return me.fire_event({'data': data, 'tag': tag, 'events': None, 'pretag': None}, 'fire_master') except Exception: exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) log.debug(lines) return False
Fire an event off up to the master server CLI Example: .. code-block:: bash salt '*' event.fire_master '{"data":"my event data"}' 'tag'
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/event.py#L34-L98
[ "def ip_bracket(addr):\n '''\n Convert IP address representation to ZMQ (URL) format. ZMQ expects\n brackets around IPv6 literals, since they are used in URLs.\n '''\n addr = ipaddress.ip_address(addr)\n return ('[{}]' if addr.version == 6 else '{}').format(addr)\n", "def factory(opts, **kwargs):\n # All Sync interfaces are just wrappers around the Async ones\n sync = SyncWrapper(AsyncReqChannel.factory, (opts,), kwargs)\n return sync\n", "def fire_event(self, data, tag, timeout=1000):\n '''\n Send a single event into the publisher with payload dict \"data\" and\n event identifier \"tag\"\n\n The default is 1000 ms\n '''\n if not six.text_type(tag): # no empty tags allowed\n raise ValueError('Empty tag.')\n\n if not isinstance(data, MutableMapping): # data must be dict\n raise ValueError(\n 'Dict object expected, not \\'{0}\\'.'.format(data)\n )\n\n if not self.cpush:\n if timeout is not None:\n timeout_s = float(timeout) / 1000\n else:\n timeout_s = None\n if not self.connect_pull(timeout=timeout_s):\n return False\n\n data['_stamp'] = datetime.datetime.utcnow().isoformat()\n\n tagend = TAGEND\n if six.PY2:\n dump_data = self.serial.dumps(data)\n else:\n # Since the pack / unpack logic here is for local events only,\n # it is safe to change the wire protocol. The mechanism\n # that sends events from minion to master is outside this\n # file.\n dump_data = self.serial.dumps(data, use_bin_type=True)\n\n serialized_data = salt.utils.dicttrim.trim_dict(\n dump_data,\n self.opts['max_event_size'],\n is_msgpacked=True,\n use_bin_type=six.PY3\n )\n log.debug('Sending event: tag = %s; data = %s', tag, data)\n event = b''.join([\n salt.utils.stringutils.to_bytes(tag),\n salt.utils.stringutils.to_bytes(tagend),\n serialized_data])\n msg = salt.utils.stringutils.to_bytes(event, 'utf-8')\n if self._run_io_loop_sync:\n with salt.utils.asynchronous.current_ioloop(self.io_loop):\n try:\n self.io_loop.run_sync(lambda: self.pusher.send(msg))\n except Exception as ex:\n log.debug(ex)\n raise\n else:\n self.io_loop.spawn_callback(self.pusher.send, msg)\n return True\n" ]
# -*- coding: utf-8 -*- ''' Use the :ref:`Salt Event System <events>` to fire events from the master to the minion and vice-versa. ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import collections import logging import os import sys import traceback # Import salt libs import salt.crypt import salt.utils.event import salt.utils.zeromq import salt.payload import salt.transport.client from salt.ext import six __proxyenabled__ = ['*'] log = logging.getLogger(__name__) def _dict_subset(keys, master_dict): ''' Return a dictionary of only the subset of keys/values specified in keys ''' return dict([(k, v) for k, v in six.iteritems(master_dict) if k in keys]) def fire(data, tag, timeout=None): ''' Fire an event on the local minion event bus. Data must be formed as a dict. CLI Example: .. code-block:: bash salt '*' event.fire '{"data":"my event data"}' 'tag' ''' if timeout is None: timeout = 60000 else: timeout = timeout * 1000 try: event = salt.utils.event.get_event(__opts__.get('__role', 'minion'), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'], opts=__opts__, keep_loop=True, listen=False) return event.fire_event(data, tag, timeout=timeout) except Exception: exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) log.debug(lines) return False def send(tag, data=None, preload=None, with_env=False, with_grains=False, with_pillar=False, with_env_opts=False, timeout=60, **kwargs): ''' Send an event to the Salt Master .. versionadded:: 2014.7.0 :param tag: A tag to give the event. Use slashes to create a namespace for related events. E.g., ``myco/build/buildserver1/start``, ``myco/build/buildserver1/success``, ``myco/build/buildserver1/failure``. :param data: A dictionary of data to send in the event. This is free-form. Send any data points that are needed for whoever is consuming the event. Arguments on the CLI are interpreted as YAML so complex data structures are possible. :param with_env: Include environment variables from the current shell environment in the event data as ``environ``.. This is a short-hand for working with systems that seed the environment with relevant data such as Jenkins. :type with_env: Specify ``True`` to include all environment variables, or specify a list of strings of variable names to include. :param with_grains: Include grains from the current minion in the event data as ``grains``. :type with_grains: Specify ``True`` to include all grains, or specify a list of strings of grain names to include. :param with_pillar: Include Pillar values from the current minion in the event data as ``pillar``. Remember Pillar data is often sensitive data so be careful. This is useful for passing ephemeral Pillar values through an event. Such as passing the ``pillar={}`` kwarg in :py:func:`state.sls <salt.modules.state.sls>` from the Master, through an event on the Minion, then back to the Master. :type with_pillar: Specify ``True`` to include all Pillar values, or specify a list of strings of Pillar keys to include. It is a best-practice to only specify a relevant subset of Pillar data. :param with_env_opts: Include ``saltenv`` and ``pillarenv`` set on minion at the moment when event is send into event data. :type with_env_opts: Specify ``True`` to include ``saltenv`` and ``pillarenv`` values or ``False`` to omit them. :param timeout: maximum duration to wait to connect to Salt's IPCMessageServer in seconds. Defaults to 60s :param kwargs: Any additional keyword arguments passed to this function will be interpreted as key-value pairs and included in the event data. This provides a convenient alternative to YAML for simple values. CLI Example: .. code-block:: bash salt-call event.send myco/mytag foo=Foo bar=Bar salt-call event.send 'myco/mytag' '{foo: Foo, bar: Bar}' A convenient way to allow Jenkins to execute ``salt-call`` is via sudo. The following rule in sudoers will allow the ``jenkins`` user to run only the following command. ``/etc/sudoers`` (allow preserving the environment): .. code-block:: text jenkins ALL=(ALL) NOPASSWD:SETENV: /usr/bin/salt-call event.send* Call Jenkins via sudo (preserve the environment): .. code-block:: bash sudo -E salt-call event.send myco/jenkins/build/success with_env='[BUILD_ID, BUILD_URL, GIT_BRANCH, GIT_COMMIT]' ''' data_dict = {} if with_env: if isinstance(with_env, list): data_dict['environ'] = _dict_subset(with_env, dict(os.environ)) else: data_dict['environ'] = dict(os.environ) if with_grains: if isinstance(with_grains, list): data_dict['grains'] = _dict_subset(with_grains, __grains__) else: data_dict['grains'] = __grains__ if with_pillar: if isinstance(with_pillar, list): data_dict['pillar'] = _dict_subset(with_pillar, __pillar__) else: data_dict['pillar'] = __pillar__ if with_env_opts: data_dict['saltenv'] = __opts__.get('saltenv', 'base') data_dict['pillarenv'] = __opts__.get('pillarenv') if kwargs: data_dict.update(kwargs) # Allow values in the ``data`` arg to override any of the above values. if isinstance(data, collections.Mapping): data_dict.update(data) if __opts__.get('local') or __opts__.get('file_client') == 'local' or __opts__.get('master_type') == 'disable': return fire(data_dict, tag, timeout=timeout) else: return fire_master(data_dict, tag, preload=preload, timeout=timeout)
saltstack/salt
salt/modules/event.py
fire
python
def fire(data, tag, timeout=None): ''' Fire an event on the local minion event bus. Data must be formed as a dict. CLI Example: .. code-block:: bash salt '*' event.fire '{"data":"my event data"}' 'tag' ''' if timeout is None: timeout = 60000 else: timeout = timeout * 1000 try: event = salt.utils.event.get_event(__opts__.get('__role', 'minion'), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'], opts=__opts__, keep_loop=True, listen=False) return event.fire_event(data, tag, timeout=timeout) except Exception: exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) log.debug(lines) return False
Fire an event on the local minion event bus. Data must be formed as a dict. CLI Example: .. code-block:: bash salt '*' event.fire '{"data":"my event data"}' 'tag'
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/event.py#L101-L128
[ "def get_event(\n node, sock_dir=None, transport='zeromq',\n opts=None, listen=True, io_loop=None, keep_loop=False, raise_errors=False):\n '''\n Return an event object suitable for the named transport\n\n :param IOLoop io_loop: Pass in an io_loop if you want asynchronous\n operation for obtaining events. Eg use of\n set_event_handler() API. Otherwise, operation\n will be synchronous.\n '''\n sock_dir = sock_dir or opts['sock_dir']\n # TODO: AIO core is separate from transport\n if node == 'master':\n return MasterEvent(sock_dir,\n opts,\n listen=listen,\n io_loop=io_loop,\n keep_loop=keep_loop,\n raise_errors=raise_errors)\n return SaltEvent(node,\n sock_dir,\n opts,\n listen=listen,\n io_loop=io_loop,\n keep_loop=keep_loop,\n raise_errors=raise_errors)\n", "def fire_event(self, data, tag, timeout=1000):\n '''\n Send a single event into the publisher with payload dict \"data\" and\n event identifier \"tag\"\n\n The default is 1000 ms\n '''\n if not six.text_type(tag): # no empty tags allowed\n raise ValueError('Empty tag.')\n\n if not isinstance(data, MutableMapping): # data must be dict\n raise ValueError(\n 'Dict object expected, not \\'{0}\\'.'.format(data)\n )\n\n if not self.cpush:\n if timeout is not None:\n timeout_s = float(timeout) / 1000\n else:\n timeout_s = None\n if not self.connect_pull(timeout=timeout_s):\n return False\n\n data['_stamp'] = datetime.datetime.utcnow().isoformat()\n\n tagend = TAGEND\n if six.PY2:\n dump_data = self.serial.dumps(data)\n else:\n # Since the pack / unpack logic here is for local events only,\n # it is safe to change the wire protocol. The mechanism\n # that sends events from minion to master is outside this\n # file.\n dump_data = self.serial.dumps(data, use_bin_type=True)\n\n serialized_data = salt.utils.dicttrim.trim_dict(\n dump_data,\n self.opts['max_event_size'],\n is_msgpacked=True,\n use_bin_type=six.PY3\n )\n log.debug('Sending event: tag = %s; data = %s', tag, data)\n event = b''.join([\n salt.utils.stringutils.to_bytes(tag),\n salt.utils.stringutils.to_bytes(tagend),\n serialized_data])\n msg = salt.utils.stringutils.to_bytes(event, 'utf-8')\n if self._run_io_loop_sync:\n with salt.utils.asynchronous.current_ioloop(self.io_loop):\n try:\n self.io_loop.run_sync(lambda: self.pusher.send(msg))\n except Exception as ex:\n log.debug(ex)\n raise\n else:\n self.io_loop.spawn_callback(self.pusher.send, msg)\n return True\n" ]
# -*- coding: utf-8 -*- ''' Use the :ref:`Salt Event System <events>` to fire events from the master to the minion and vice-versa. ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import collections import logging import os import sys import traceback # Import salt libs import salt.crypt import salt.utils.event import salt.utils.zeromq import salt.payload import salt.transport.client from salt.ext import six __proxyenabled__ = ['*'] log = logging.getLogger(__name__) def _dict_subset(keys, master_dict): ''' Return a dictionary of only the subset of keys/values specified in keys ''' return dict([(k, v) for k, v in six.iteritems(master_dict) if k in keys]) def fire_master(data, tag, preload=None, timeout=60): ''' Fire an event off up to the master server CLI Example: .. code-block:: bash salt '*' event.fire_master '{"data":"my event data"}' 'tag' ''' if (__opts__.get('local', None) or __opts__.get('file_client', None) == 'local') and not __opts__.get('use_master_when_local', False): # We can't send an event if we're in masterless mode log.warning('Local mode detected. Event with tag %s will NOT be sent.', tag) return False if preload or __opts__.get('__cli') == 'salt-call': # If preload is specified, we must send a raw event (this is # slower because it has to independently authenticate) if 'master_uri' not in __opts__: __opts__['master_uri'] = 'tcp://{ip}:{port}'.format( ip=salt.utils.zeromq.ip_bracket(__opts__['interface']), port=__opts__.get('ret_port', '4506') # TODO, no fallback ) masters = list() ret = None if 'master_uri_list' in __opts__: for master_uri in __opts__['master_uri_list']: masters.append(master_uri) else: masters.append(__opts__['master_uri']) auth = salt.crypt.SAuth(__opts__) load = {'id': __opts__['id'], 'tag': tag, 'data': data, 'tok': auth.gen_token(b'salt'), 'cmd': '_minion_event'} if isinstance(preload, dict): load.update(preload) for master in masters: channel = salt.transport.client.ReqChannel.factory(__opts__, master_uri=master) try: channel.send(load, timeout=timeout) # channel.send was successful. # Ensure ret is True. ret = True except Exception: # only set a False ret if it hasn't been sent atleast once if ret is None: ret = False finally: channel.close() return ret else: # Usually, we can send the event via the minion, which is faster # because it is already authenticated try: me = salt.utils.event.MinionEvent(__opts__, listen=False, keep_loop=True) return me.fire_event({'data': data, 'tag': tag, 'events': None, 'pretag': None}, 'fire_master') except Exception: exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) log.debug(lines) return False def send(tag, data=None, preload=None, with_env=False, with_grains=False, with_pillar=False, with_env_opts=False, timeout=60, **kwargs): ''' Send an event to the Salt Master .. versionadded:: 2014.7.0 :param tag: A tag to give the event. Use slashes to create a namespace for related events. E.g., ``myco/build/buildserver1/start``, ``myco/build/buildserver1/success``, ``myco/build/buildserver1/failure``. :param data: A dictionary of data to send in the event. This is free-form. Send any data points that are needed for whoever is consuming the event. Arguments on the CLI are interpreted as YAML so complex data structures are possible. :param with_env: Include environment variables from the current shell environment in the event data as ``environ``.. This is a short-hand for working with systems that seed the environment with relevant data such as Jenkins. :type with_env: Specify ``True`` to include all environment variables, or specify a list of strings of variable names to include. :param with_grains: Include grains from the current minion in the event data as ``grains``. :type with_grains: Specify ``True`` to include all grains, or specify a list of strings of grain names to include. :param with_pillar: Include Pillar values from the current minion in the event data as ``pillar``. Remember Pillar data is often sensitive data so be careful. This is useful for passing ephemeral Pillar values through an event. Such as passing the ``pillar={}`` kwarg in :py:func:`state.sls <salt.modules.state.sls>` from the Master, through an event on the Minion, then back to the Master. :type with_pillar: Specify ``True`` to include all Pillar values, or specify a list of strings of Pillar keys to include. It is a best-practice to only specify a relevant subset of Pillar data. :param with_env_opts: Include ``saltenv`` and ``pillarenv`` set on minion at the moment when event is send into event data. :type with_env_opts: Specify ``True`` to include ``saltenv`` and ``pillarenv`` values or ``False`` to omit them. :param timeout: maximum duration to wait to connect to Salt's IPCMessageServer in seconds. Defaults to 60s :param kwargs: Any additional keyword arguments passed to this function will be interpreted as key-value pairs and included in the event data. This provides a convenient alternative to YAML for simple values. CLI Example: .. code-block:: bash salt-call event.send myco/mytag foo=Foo bar=Bar salt-call event.send 'myco/mytag' '{foo: Foo, bar: Bar}' A convenient way to allow Jenkins to execute ``salt-call`` is via sudo. The following rule in sudoers will allow the ``jenkins`` user to run only the following command. ``/etc/sudoers`` (allow preserving the environment): .. code-block:: text jenkins ALL=(ALL) NOPASSWD:SETENV: /usr/bin/salt-call event.send* Call Jenkins via sudo (preserve the environment): .. code-block:: bash sudo -E salt-call event.send myco/jenkins/build/success with_env='[BUILD_ID, BUILD_URL, GIT_BRANCH, GIT_COMMIT]' ''' data_dict = {} if with_env: if isinstance(with_env, list): data_dict['environ'] = _dict_subset(with_env, dict(os.environ)) else: data_dict['environ'] = dict(os.environ) if with_grains: if isinstance(with_grains, list): data_dict['grains'] = _dict_subset(with_grains, __grains__) else: data_dict['grains'] = __grains__ if with_pillar: if isinstance(with_pillar, list): data_dict['pillar'] = _dict_subset(with_pillar, __pillar__) else: data_dict['pillar'] = __pillar__ if with_env_opts: data_dict['saltenv'] = __opts__.get('saltenv', 'base') data_dict['pillarenv'] = __opts__.get('pillarenv') if kwargs: data_dict.update(kwargs) # Allow values in the ``data`` arg to override any of the above values. if isinstance(data, collections.Mapping): data_dict.update(data) if __opts__.get('local') or __opts__.get('file_client') == 'local' or __opts__.get('master_type') == 'disable': return fire(data_dict, tag, timeout=timeout) else: return fire_master(data_dict, tag, preload=preload, timeout=timeout)
saltstack/salt
salt/modules/event.py
send
python
def send(tag, data=None, preload=None, with_env=False, with_grains=False, with_pillar=False, with_env_opts=False, timeout=60, **kwargs): ''' Send an event to the Salt Master .. versionadded:: 2014.7.0 :param tag: A tag to give the event. Use slashes to create a namespace for related events. E.g., ``myco/build/buildserver1/start``, ``myco/build/buildserver1/success``, ``myco/build/buildserver1/failure``. :param data: A dictionary of data to send in the event. This is free-form. Send any data points that are needed for whoever is consuming the event. Arguments on the CLI are interpreted as YAML so complex data structures are possible. :param with_env: Include environment variables from the current shell environment in the event data as ``environ``.. This is a short-hand for working with systems that seed the environment with relevant data such as Jenkins. :type with_env: Specify ``True`` to include all environment variables, or specify a list of strings of variable names to include. :param with_grains: Include grains from the current minion in the event data as ``grains``. :type with_grains: Specify ``True`` to include all grains, or specify a list of strings of grain names to include. :param with_pillar: Include Pillar values from the current minion in the event data as ``pillar``. Remember Pillar data is often sensitive data so be careful. This is useful for passing ephemeral Pillar values through an event. Such as passing the ``pillar={}`` kwarg in :py:func:`state.sls <salt.modules.state.sls>` from the Master, through an event on the Minion, then back to the Master. :type with_pillar: Specify ``True`` to include all Pillar values, or specify a list of strings of Pillar keys to include. It is a best-practice to only specify a relevant subset of Pillar data. :param with_env_opts: Include ``saltenv`` and ``pillarenv`` set on minion at the moment when event is send into event data. :type with_env_opts: Specify ``True`` to include ``saltenv`` and ``pillarenv`` values or ``False`` to omit them. :param timeout: maximum duration to wait to connect to Salt's IPCMessageServer in seconds. Defaults to 60s :param kwargs: Any additional keyword arguments passed to this function will be interpreted as key-value pairs and included in the event data. This provides a convenient alternative to YAML for simple values. CLI Example: .. code-block:: bash salt-call event.send myco/mytag foo=Foo bar=Bar salt-call event.send 'myco/mytag' '{foo: Foo, bar: Bar}' A convenient way to allow Jenkins to execute ``salt-call`` is via sudo. The following rule in sudoers will allow the ``jenkins`` user to run only the following command. ``/etc/sudoers`` (allow preserving the environment): .. code-block:: text jenkins ALL=(ALL) NOPASSWD:SETENV: /usr/bin/salt-call event.send* Call Jenkins via sudo (preserve the environment): .. code-block:: bash sudo -E salt-call event.send myco/jenkins/build/success with_env='[BUILD_ID, BUILD_URL, GIT_BRANCH, GIT_COMMIT]' ''' data_dict = {} if with_env: if isinstance(with_env, list): data_dict['environ'] = _dict_subset(with_env, dict(os.environ)) else: data_dict['environ'] = dict(os.environ) if with_grains: if isinstance(with_grains, list): data_dict['grains'] = _dict_subset(with_grains, __grains__) else: data_dict['grains'] = __grains__ if with_pillar: if isinstance(with_pillar, list): data_dict['pillar'] = _dict_subset(with_pillar, __pillar__) else: data_dict['pillar'] = __pillar__ if with_env_opts: data_dict['saltenv'] = __opts__.get('saltenv', 'base') data_dict['pillarenv'] = __opts__.get('pillarenv') if kwargs: data_dict.update(kwargs) # Allow values in the ``data`` arg to override any of the above values. if isinstance(data, collections.Mapping): data_dict.update(data) if __opts__.get('local') or __opts__.get('file_client') == 'local' or __opts__.get('master_type') == 'disable': return fire(data_dict, tag, timeout=timeout) else: return fire_master(data_dict, tag, preload=preload, timeout=timeout)
Send an event to the Salt Master .. versionadded:: 2014.7.0 :param tag: A tag to give the event. Use slashes to create a namespace for related events. E.g., ``myco/build/buildserver1/start``, ``myco/build/buildserver1/success``, ``myco/build/buildserver1/failure``. :param data: A dictionary of data to send in the event. This is free-form. Send any data points that are needed for whoever is consuming the event. Arguments on the CLI are interpreted as YAML so complex data structures are possible. :param with_env: Include environment variables from the current shell environment in the event data as ``environ``.. This is a short-hand for working with systems that seed the environment with relevant data such as Jenkins. :type with_env: Specify ``True`` to include all environment variables, or specify a list of strings of variable names to include. :param with_grains: Include grains from the current minion in the event data as ``grains``. :type with_grains: Specify ``True`` to include all grains, or specify a list of strings of grain names to include. :param with_pillar: Include Pillar values from the current minion in the event data as ``pillar``. Remember Pillar data is often sensitive data so be careful. This is useful for passing ephemeral Pillar values through an event. Such as passing the ``pillar={}`` kwarg in :py:func:`state.sls <salt.modules.state.sls>` from the Master, through an event on the Minion, then back to the Master. :type with_pillar: Specify ``True`` to include all Pillar values, or specify a list of strings of Pillar keys to include. It is a best-practice to only specify a relevant subset of Pillar data. :param with_env_opts: Include ``saltenv`` and ``pillarenv`` set on minion at the moment when event is send into event data. :type with_env_opts: Specify ``True`` to include ``saltenv`` and ``pillarenv`` values or ``False`` to omit them. :param timeout: maximum duration to wait to connect to Salt's IPCMessageServer in seconds. Defaults to 60s :param kwargs: Any additional keyword arguments passed to this function will be interpreted as key-value pairs and included in the event data. This provides a convenient alternative to YAML for simple values. CLI Example: .. code-block:: bash salt-call event.send myco/mytag foo=Foo bar=Bar salt-call event.send 'myco/mytag' '{foo: Foo, bar: Bar}' A convenient way to allow Jenkins to execute ``salt-call`` is via sudo. The following rule in sudoers will allow the ``jenkins`` user to run only the following command. ``/etc/sudoers`` (allow preserving the environment): .. code-block:: text jenkins ALL=(ALL) NOPASSWD:SETENV: /usr/bin/salt-call event.send* Call Jenkins via sudo (preserve the environment): .. code-block:: bash sudo -E salt-call event.send myco/jenkins/build/success with_env='[BUILD_ID, BUILD_URL, GIT_BRANCH, GIT_COMMIT]'
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/event.py#L131-L247
[ "def fire(data, tag, timeout=None):\n '''\n Fire an event on the local minion event bus. Data must be formed as a dict.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' event.fire '{\"data\":\"my event data\"}' 'tag'\n '''\n if timeout is None:\n timeout = 60000\n else:\n timeout = timeout * 1000\n try:\n event = salt.utils.event.get_event(__opts__.get('__role', 'minion'),\n sock_dir=__opts__['sock_dir'],\n transport=__opts__['transport'],\n opts=__opts__,\n keep_loop=True,\n listen=False)\n\n return event.fire_event(data, tag, timeout=timeout)\n except Exception:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n lines = traceback.format_exception(exc_type, exc_value, exc_traceback)\n log.debug(lines)\n return False\n", "def fire_master(data, tag, preload=None, timeout=60):\n '''\n Fire an event off up to the master server\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' event.fire_master '{\"data\":\"my event data\"}' 'tag'\n '''\n if (__opts__.get('local', None) or __opts__.get('file_client', None) == 'local') and not __opts__.get('use_master_when_local', False):\n # We can't send an event if we're in masterless mode\n log.warning('Local mode detected. Event with tag %s will NOT be sent.', tag)\n return False\n\n if preload or __opts__.get('__cli') == 'salt-call':\n # If preload is specified, we must send a raw event (this is\n # slower because it has to independently authenticate)\n if 'master_uri' not in __opts__:\n __opts__['master_uri'] = 'tcp://{ip}:{port}'.format(\n ip=salt.utils.zeromq.ip_bracket(__opts__['interface']),\n port=__opts__.get('ret_port', '4506') # TODO, no fallback\n )\n masters = list()\n ret = None\n if 'master_uri_list' in __opts__:\n for master_uri in __opts__['master_uri_list']:\n masters.append(master_uri)\n else:\n masters.append(__opts__['master_uri'])\n auth = salt.crypt.SAuth(__opts__)\n load = {'id': __opts__['id'],\n 'tag': tag,\n 'data': data,\n 'tok': auth.gen_token(b'salt'),\n 'cmd': '_minion_event'}\n\n if isinstance(preload, dict):\n load.update(preload)\n\n for master in masters:\n channel = salt.transport.client.ReqChannel.factory(__opts__, master_uri=master)\n try:\n channel.send(load, timeout=timeout)\n # channel.send was successful.\n # Ensure ret is True.\n ret = True\n except Exception:\n # only set a False ret if it hasn't been sent atleast once\n if ret is None:\n ret = False\n finally:\n channel.close()\n return ret\n else:\n # Usually, we can send the event via the minion, which is faster\n # because it is already authenticated\n try:\n me = salt.utils.event.MinionEvent(__opts__, listen=False, keep_loop=True)\n return me.fire_event({'data': data, 'tag': tag, 'events': None, 'pretag': None}, 'fire_master')\n except Exception:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n lines = traceback.format_exception(exc_type, exc_value, exc_traceback)\n log.debug(lines)\n return False\n", "def _dict_subset(keys, master_dict):\n '''\n Return a dictionary of only the subset of keys/values specified in keys\n '''\n return dict([(k, v) for k, v in six.iteritems(master_dict) if k in keys])\n" ]
# -*- coding: utf-8 -*- ''' Use the :ref:`Salt Event System <events>` to fire events from the master to the minion and vice-versa. ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import collections import logging import os import sys import traceback # Import salt libs import salt.crypt import salt.utils.event import salt.utils.zeromq import salt.payload import salt.transport.client from salt.ext import six __proxyenabled__ = ['*'] log = logging.getLogger(__name__) def _dict_subset(keys, master_dict): ''' Return a dictionary of only the subset of keys/values specified in keys ''' return dict([(k, v) for k, v in six.iteritems(master_dict) if k in keys]) def fire_master(data, tag, preload=None, timeout=60): ''' Fire an event off up to the master server CLI Example: .. code-block:: bash salt '*' event.fire_master '{"data":"my event data"}' 'tag' ''' if (__opts__.get('local', None) or __opts__.get('file_client', None) == 'local') and not __opts__.get('use_master_when_local', False): # We can't send an event if we're in masterless mode log.warning('Local mode detected. Event with tag %s will NOT be sent.', tag) return False if preload or __opts__.get('__cli') == 'salt-call': # If preload is specified, we must send a raw event (this is # slower because it has to independently authenticate) if 'master_uri' not in __opts__: __opts__['master_uri'] = 'tcp://{ip}:{port}'.format( ip=salt.utils.zeromq.ip_bracket(__opts__['interface']), port=__opts__.get('ret_port', '4506') # TODO, no fallback ) masters = list() ret = None if 'master_uri_list' in __opts__: for master_uri in __opts__['master_uri_list']: masters.append(master_uri) else: masters.append(__opts__['master_uri']) auth = salt.crypt.SAuth(__opts__) load = {'id': __opts__['id'], 'tag': tag, 'data': data, 'tok': auth.gen_token(b'salt'), 'cmd': '_minion_event'} if isinstance(preload, dict): load.update(preload) for master in masters: channel = salt.transport.client.ReqChannel.factory(__opts__, master_uri=master) try: channel.send(load, timeout=timeout) # channel.send was successful. # Ensure ret is True. ret = True except Exception: # only set a False ret if it hasn't been sent atleast once if ret is None: ret = False finally: channel.close() return ret else: # Usually, we can send the event via the minion, which is faster # because it is already authenticated try: me = salt.utils.event.MinionEvent(__opts__, listen=False, keep_loop=True) return me.fire_event({'data': data, 'tag': tag, 'events': None, 'pretag': None}, 'fire_master') except Exception: exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) log.debug(lines) return False def fire(data, tag, timeout=None): ''' Fire an event on the local minion event bus. Data must be formed as a dict. CLI Example: .. code-block:: bash salt '*' event.fire '{"data":"my event data"}' 'tag' ''' if timeout is None: timeout = 60000 else: timeout = timeout * 1000 try: event = salt.utils.event.get_event(__opts__.get('__role', 'minion'), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'], opts=__opts__, keep_loop=True, listen=False) return event.fire_event(data, tag, timeout=timeout) except Exception: exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) log.debug(lines) return False
saltstack/salt
salt/states/postgres_cluster.py
present
python
def present(version, name, port=None, encoding=None, locale=None, datadir=None, allow_group_access=None, data_checksums=None, wal_segsize=None ): ''' Ensure that the named cluster is present with the specified properties. For more information about all of these options see man pg_createcluster(1) version Version of the postgresql cluster name The name of the cluster port Cluster port encoding The character encoding scheme to be used in this database locale Locale with which to create cluster datadir Where the cluster is stored allow_group_access Allows users in the same group as the cluster owner to read all cluster files created by initdb data_checksums Use checksums on data pages wal_segsize Set the WAL segment size, in megabytes .. versionadded:: 2015.XX ''' msg = 'Cluster {0}/{1} is already present'.format(version, name) ret = {'name': name, 'changes': {}, 'result': True, 'comment': msg} if __salt__['postgres.cluster_exists'](version, name): # check cluster config is correct infos = __salt__['postgres.cluster_list'](verbose=True) info = infos['{0}/{1}'.format(version, name)] # TODO: check locale en encoding configs also if any((port != info['port'] if port else False, datadir != info['datadir'] if datadir else False,)): ret['comment'] = 'Cluster {0}/{1} has wrong parameters ' \ 'which couldn\'t be changed on fly.' \ .format(version, name) ret['result'] = False return ret # The cluster is not present, add it! if __opts__.get('test'): ret['result'] = None msg = 'Cluster {0}/{1} is set to be created' ret['comment'] = msg.format(version, name) return ret cluster = __salt__['postgres.cluster_create']( version=version, name=name, port=port, locale=locale, encoding=encoding, datadir=datadir, allow_group_access=allow_group_access, data_checksums=data_checksums, wal_segsize=wal_segsize) if cluster: msg = 'The cluster {0}/{1} has been created' ret['comment'] = msg.format(version, name) ret['changes']['{0}/{1}'.format(version, name)] = 'Present' else: msg = 'Failed to create cluster {0}/{1}' ret['comment'] = msg.format(version, name) ret['result'] = False return ret
Ensure that the named cluster is present with the specified properties. For more information about all of these options see man pg_createcluster(1) version Version of the postgresql cluster name The name of the cluster port Cluster port encoding The character encoding scheme to be used in this database locale Locale with which to create cluster datadir Where the cluster is stored allow_group_access Allows users in the same group as the cluster owner to read all cluster files created by initdb data_checksums Use checksums on data pages wal_segsize Set the WAL segment size, in megabytes .. versionadded:: 2015.XX
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/postgres_cluster.py#L28-L114
null
# -*- coding: utf-8 -*- ''' Management of PostgreSQL clusters ================================= The postgres_cluster state module is used to manage PostgreSQL clusters. Clusters can be set as either absent or present .. code-block:: yaml create cluster 9.3 main: postgres_cluster.present: - name: 'main' - version: '9.3' ''' from __future__ import absolute_import, unicode_literals, print_function def __virtual__(): ''' Only load if the deb_postgres module is present ''' if 'postgres.cluster_exists' not in __salt__: return (False, 'Unable to load postgres module. Make sure `postgres.bins_dir` is set.') return True def absent(version, name): ''' Ensure that the named cluster is absent version Version of the postgresql server of the cluster to remove name The name of the cluster to remove .. versionadded:: 2015.XX ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} #check if cluster exists and remove it if __salt__['postgres.cluster_exists'](version, name): if __opts__.get('test'): ret['result'] = None msg = 'Cluster {0}/{1} is set to be removed' ret['comment'] = msg.format(version, name) return ret if __salt__['postgres.cluster_remove'](version, name, True): msg = 'Cluster {0}/{1} has been removed' ret['comment'] = msg.format(version, name) ret['changes'][name] = 'Absent' return ret # fallback ret['comment'] = 'Cluster {0}/{1} is not present, so it cannot ' \ 'be removed'.format(version, name) return ret
saltstack/salt
salt/states/postgres_cluster.py
absent
python
def absent(version, name): ''' Ensure that the named cluster is absent version Version of the postgresql server of the cluster to remove name The name of the cluster to remove .. versionadded:: 2015.XX ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} #check if cluster exists and remove it if __salt__['postgres.cluster_exists'](version, name): if __opts__.get('test'): ret['result'] = None msg = 'Cluster {0}/{1} is set to be removed' ret['comment'] = msg.format(version, name) return ret if __salt__['postgres.cluster_remove'](version, name, True): msg = 'Cluster {0}/{1} has been removed' ret['comment'] = msg.format(version, name) ret['changes'][name] = 'Absent' return ret # fallback ret['comment'] = 'Cluster {0}/{1} is not present, so it cannot ' \ 'be removed'.format(version, name) return ret
Ensure that the named cluster is absent version Version of the postgresql server of the cluster to remove name The name of the cluster to remove .. versionadded:: 2015.XX
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/postgres_cluster.py#L117-L151
null
# -*- coding: utf-8 -*- ''' Management of PostgreSQL clusters ================================= The postgres_cluster state module is used to manage PostgreSQL clusters. Clusters can be set as either absent or present .. code-block:: yaml create cluster 9.3 main: postgres_cluster.present: - name: 'main' - version: '9.3' ''' from __future__ import absolute_import, unicode_literals, print_function def __virtual__(): ''' Only load if the deb_postgres module is present ''' if 'postgres.cluster_exists' not in __salt__: return (False, 'Unable to load postgres module. Make sure `postgres.bins_dir` is set.') return True def present(version, name, port=None, encoding=None, locale=None, datadir=None, allow_group_access=None, data_checksums=None, wal_segsize=None ): ''' Ensure that the named cluster is present with the specified properties. For more information about all of these options see man pg_createcluster(1) version Version of the postgresql cluster name The name of the cluster port Cluster port encoding The character encoding scheme to be used in this database locale Locale with which to create cluster datadir Where the cluster is stored allow_group_access Allows users in the same group as the cluster owner to read all cluster files created by initdb data_checksums Use checksums on data pages wal_segsize Set the WAL segment size, in megabytes .. versionadded:: 2015.XX ''' msg = 'Cluster {0}/{1} is already present'.format(version, name) ret = {'name': name, 'changes': {}, 'result': True, 'comment': msg} if __salt__['postgres.cluster_exists'](version, name): # check cluster config is correct infos = __salt__['postgres.cluster_list'](verbose=True) info = infos['{0}/{1}'.format(version, name)] # TODO: check locale en encoding configs also if any((port != info['port'] if port else False, datadir != info['datadir'] if datadir else False,)): ret['comment'] = 'Cluster {0}/{1} has wrong parameters ' \ 'which couldn\'t be changed on fly.' \ .format(version, name) ret['result'] = False return ret # The cluster is not present, add it! if __opts__.get('test'): ret['result'] = None msg = 'Cluster {0}/{1} is set to be created' ret['comment'] = msg.format(version, name) return ret cluster = __salt__['postgres.cluster_create']( version=version, name=name, port=port, locale=locale, encoding=encoding, datadir=datadir, allow_group_access=allow_group_access, data_checksums=data_checksums, wal_segsize=wal_segsize) if cluster: msg = 'The cluster {0}/{1} has been created' ret['comment'] = msg.format(version, name) ret['changes']['{0}/{1}'.format(version, name)] = 'Present' else: msg = 'Failed to create cluster {0}/{1}' ret['comment'] = msg.format(version, name) ret['result'] = False return ret
saltstack/salt
salt/returners/splunk.py
_send_splunk
python
def _send_splunk(event, index_override=None, sourcetype_override=None): ''' Send the results to Splunk. Requires the Splunk HTTP Event Collector running on port 8088. This is available on Splunk Enterprise version 6.3 or higher. ''' # Get Splunk Options opts = _get_options() log.info(str('Options: %s'), # future lint: disable=blacklisted-function salt.utils.json.dumps(opts)) http_event_collector_key = opts['token'] http_event_collector_host = opts['indexer'] # Set up the collector splunk_event = http_event_collector(http_event_collector_key, http_event_collector_host) # init the payload payload = {} # Set up the event metadata if index_override is None: payload.update({"index": opts['index']}) else: payload.update({"index": index_override}) if sourcetype_override is None: payload.update({"sourcetype": opts['sourcetype']}) else: payload.update({"index": sourcetype_override}) # Add the event payload.update({"event": event}) log.info(str('Payload: %s'), # future lint: disable=blacklisted-function salt.utils.json.dumps(payload)) # Fire it off splunk_event.sendEvent(payload) return True
Send the results to Splunk. Requires the Splunk HTTP Event Collector running on port 8088. This is available on Splunk Enterprise version 6.3 or higher.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/splunk.py#L70-L104
[ "def dumps(obj, **kwargs):\n '''\n .. versionadded:: 2018.3.0\n\n Wraps json.dumps, and assumes that ensure_ascii is False (unless explicitly\n passed as True) for unicode compatibility. Note that setting it to True\n will mess up any unicode characters, as they will be dumped as the string\n literal version of the unicode code point.\n\n On Python 2, encodes the result to a str since json.dumps does not want\n unicode types.\n\n You can pass an alternate json module (loaded via import_json() above)\n using the _json_module argument)\n '''\n json_module = kwargs.pop('_json_module', json)\n orig_enc_func = kwargs.pop('default', lambda x: x)\n\n def _enc_func(obj):\n obj = ThreadLocalProxy.unproxy(obj)\n return orig_enc_func(obj)\n\n if 'ensure_ascii' not in kwargs:\n kwargs['ensure_ascii'] = False\n if six.PY2:\n obj = salt.utils.data.encode(obj)\n return json_module.dumps(obj, default=_enc_func, **kwargs) # future lint: blacklisted-function\n", "def _get_options():\n try:\n token = __salt__['config.get']('splunk_http_forwarder:token')\n indexer = __salt__['config.get']('splunk_http_forwarder:indexer')\n sourcetype = __salt__['config.get']('splunk_http_forwarder:sourcetype')\n index = __salt__['config.get']('splunk_http_forwarder:index')\n except Exception:\n log.error(\"Splunk HTTP Forwarder parameters not present in config.\")\n return None\n splunk_opts = {\"token\": token, \"indexer\": indexer, \"sourcetype\": sourcetype, \"index\": index}\n return splunk_opts\n", "def sendEvent(self, payload, eventtime=\"\"):\n # Method to immediately send an event to the http event collector\n\n headers = {'Authorization': 'Splunk ' + self.token}\n\n # If eventtime in epoch not passed as optional argument use current system time in epoch\n if not eventtime:\n eventtime = six.text_type(int(time.time()))\n\n # Fill in local hostname if not manually populated\n if 'host' not in payload:\n payload.update({\"host\": self.host})\n\n # Update time value on payload if need to use system time\n data = {\"time\": eventtime}\n data.update(payload)\n\n # send event to http event collector\n r = requests.post(self.server_uri,\n data=salt.utils.json.dumps(data),\n headers=headers,\n verify=http_event_collector_SSL_verify)\n\n # Print debug info if flag set\n if http_event_collector_debug:\n log.debug(r.text)\n log.debug(data)\n" ]
# -*- coding: utf-8 -*- ''' Send json response data to Splunk via the HTTP Event Collector Requires the following config values to be specified in config or pillar: .. code-block:: yaml splunk_http_forwarder: token: <splunk_http_forwarder_token> indexer: <hostname/IP of Splunk indexer> sourcetype: <Destination sourcetype for data> index: <Destination index for data> Run a test by using ``salt-call test.ping --return splunk`` Written by Scott Pack (github.com/scottjpack) ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import logging import requests import socket import time # Import salt libs import salt.utils.json # Import 3rd-party libs from salt.ext import six _max_content_bytes = 100000 http_event_collector_SSL_verify = False http_event_collector_debug = False log = logging.getLogger(__name__) __virtualname__ = "splunk" def __virtual__(): ''' Return virtual name of the module. :return: The virtual name of the module. ''' return __virtualname__ def returner(ret): ''' Send a message to Splunk via the HTTP Event Collector ''' return _send_splunk(ret) def _get_options(): try: token = __salt__['config.get']('splunk_http_forwarder:token') indexer = __salt__['config.get']('splunk_http_forwarder:indexer') sourcetype = __salt__['config.get']('splunk_http_forwarder:sourcetype') index = __salt__['config.get']('splunk_http_forwarder:index') except Exception: log.error("Splunk HTTP Forwarder parameters not present in config.") return None splunk_opts = {"token": token, "indexer": indexer, "sourcetype": sourcetype, "index": index} return splunk_opts # Thanks to George Starcher for the http_event_collector class (https://github.com/georgestarcher/) class http_event_collector(object): def __init__(self, token, http_event_server, host="", http_event_port='8088', http_event_server_ssl=True, max_bytes=_max_content_bytes): self.token = token self.batchEvents = [] self.maxByteLength = max_bytes self.currentByteLength = 0 # Set host to specified value or default to localhostname if no value provided if host: self.host = host else: self.host = socket.gethostname() # Build and set server_uri for http event collector # Defaults to SSL if flag not passed # Defaults to port 8088 if port not passed if http_event_server_ssl: buildURI = ['https://'] else: buildURI = ['http://'] for i in [http_event_server, ':', http_event_port, '/services/collector/event']: buildURI.append(i) self.server_uri = "".join(buildURI) if http_event_collector_debug: log.debug(self.token) log.debug(self.server_uri) def sendEvent(self, payload, eventtime=""): # Method to immediately send an event to the http event collector headers = {'Authorization': 'Splunk ' + self.token} # If eventtime in epoch not passed as optional argument use current system time in epoch if not eventtime: eventtime = six.text_type(int(time.time())) # Fill in local hostname if not manually populated if 'host' not in payload: payload.update({"host": self.host}) # Update time value on payload if need to use system time data = {"time": eventtime} data.update(payload) # send event to http event collector r = requests.post(self.server_uri, data=salt.utils.json.dumps(data), headers=headers, verify=http_event_collector_SSL_verify) # Print debug info if flag set if http_event_collector_debug: log.debug(r.text) log.debug(data) def batchEvent(self, payload, eventtime=""): # Method to store the event in a batch to flush later # Fill in local hostname if not manually populated if 'host' not in payload: payload.update({"host": self.host}) serialized_payload = salt.utils.json.dumps(payload) payloadLength = len(serialized_payload) if (self.currentByteLength + payloadLength) > self.maxByteLength: self.flushBatch() # Print debug info if flag set if http_event_collector_debug: log.debug('auto flushing') else: self.currentByteLength = self.currentByteLength + payloadLength # If eventtime in epoch not passed as optional argument use current system time in epoch if not eventtime: eventtime = six.text_type(int(time.time())) # Update time value on payload if need to use system time data = {"time": eventtime} data.update(payload) self.batchEvents.append(serialized_payload) def flushBatch(self): # Method to flush the batch list of events if self.batchEvents: headers = {'Authorization': 'Splunk '+self.token} r = requests.post(self.server_uri, data=" ".join(self.batchEvents), headers=headers, verify=http_event_collector_SSL_verify) self.batchEvents = [] self.currentByteLength = 0
saltstack/salt
salt/output/dson.py
output
python
def output(data, **kwargs): # pylint: disable=unused-argument ''' Print the output data in JSON ''' try: dump_opts = {'indent': 4, 'default': repr} if 'output_indent' in __opts__: indent = __opts__.get('output_indent') sort_keys = False if indent == 'pretty': indent = 4 sort_keys = True elif isinstance(indent, six.integer_types): if indent >= 0: indent = indent else: indent = None dump_opts['indent'] = indent dump_opts['sort_keys'] = sort_keys return dson.dumps(data, **dump_opts) except UnicodeDecodeError as exc: log.error('Unable to serialize output to dson') return dson.dumps( {'error': 'Unable to serialize output to DSON', 'message': six.text_type(exc)} ) except TypeError: log.debug('An error occurred while outputting DSON', exc_info=True) # Return valid JSON for unserializable objects return dson.dumps({})
Print the output data in JSON
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/output/dson.py#L37-L74
null
# -*- coding: utf-8 -*- ''' Display return data in DSON format ================================== This outputter is intended for demonstration purposes. Information on the DSON spec can be found `here`__. .. __: http://vpzomtrrfrt.github.io/DSON/ This outputter requires `Dogeon`__ (installable via pip) .. __: https://github.com/soasme/dogeon ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import logging # Import 3rd-party libs try: import dson except ImportError: dson = None from salt.ext import six log = logging.getLogger(__name__) def __virtual__(): if dson is None: return (False, 'The dogeon Python package is not installed') return True
saltstack/salt
salt/states/serverdensity_device.py
_get_salt_params
python
def _get_salt_params(): ''' Try to get all sort of parameters for Server Density server info. NOTE: Missing publicDNS and publicIPs parameters. There might be way of getting them with salt-cloud. ''' all_stats = __salt__['status.all_status']() all_grains = __salt__['grains.items']() params = {} try: params['name'] = all_grains['id'] params['hostname'] = all_grains['host'] if all_grains['kernel'] == 'Darwin': sd_os = {'code': 'mac', 'name': 'Mac'} else: sd_os = {'code': all_grains['kernel'].lower(), 'name': all_grains['kernel']} params['os'] = salt.utils.json.dumps(sd_os) params['cpuCores'] = all_stats['cpuinfo']['cpu cores'] params['installedRAM'] = six.text_type(int(all_stats['meminfo']['MemTotal']['value']) / 1024) params['swapSpace'] = six.text_type(int(all_stats['meminfo']['SwapTotal']['value']) / 1024) params['privateIPs'] = salt.utils.json.dumps(all_grains['fqdn_ip4']) params['privateDNS'] = salt.utils.json.dumps(all_grains['fqdn']) except KeyError: pass return params
Try to get all sort of parameters for Server Density server info. NOTE: Missing publicDNS and publicIPs parameters. There might be way of getting them with salt-cloud.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/serverdensity_device.py#L69-L95
[ "def dumps(obj, **kwargs):\n '''\n .. versionadded:: 2018.3.0\n\n Wraps json.dumps, and assumes that ensure_ascii is False (unless explicitly\n passed as True) for unicode compatibility. Note that setting it to True\n will mess up any unicode characters, as they will be dumped as the string\n literal version of the unicode code point.\n\n On Python 2, encodes the result to a str since json.dumps does not want\n unicode types.\n\n You can pass an alternate json module (loaded via import_json() above)\n using the _json_module argument)\n '''\n json_module = kwargs.pop('_json_module', json)\n orig_enc_func = kwargs.pop('default', lambda x: x)\n\n def _enc_func(obj):\n obj = ThreadLocalProxy.unproxy(obj)\n return orig_enc_func(obj)\n\n if 'ensure_ascii' not in kwargs:\n kwargs['ensure_ascii'] = False\n if six.PY2:\n obj = salt.utils.data.encode(obj)\n return json_module.dumps(obj, default=_enc_func, **kwargs) # future lint: blacklisted-function\n" ]
# -*- coding: utf-8 -*- ''' Monitor Server with Server Density ================================== .. versionadded:: 2014.7.0 `Server Density <https://www.serverdensity.com/>`_ Is a hosted monitoring service. .. warning:: This state module is beta. It might be changed later to include more or less automation. .. note:: This state module requires a pillar for authentication with Server Density To install a v1 agent: .. code-block:: yaml serverdensity: api_token: "b97da80a41c4f61bff05975ee51eb1aa" account_url: "https://your-account.serverdensity.io" To install a v2 agent: .. code-block:: yaml serverdensity: api_token: "b97da80a41c4f61bff05975ee51eb1aa" account_name: "your-account" .. note:: Although Server Density allows duplicate device names in its database, this module will raise an exception if you try monitoring devices with the same name. Example: .. code-block:: yaml 'server_name': serverdensity_device.monitored ''' # Import python libs from __future__ import absolute_import, unicode_literals, print_function import logging # Import Salt libs import salt.utils.json # Import 3rd-party libs from salt.ext import six # TODO: # # Add a plugin support # Add notification support log = logging.getLogger(__name__) def monitored(name, group=None, salt_name=True, salt_params=True, agent_version=1, **params): ''' Device is monitored with Server Density. name Device name in Server Density. salt_name If ``True`` (default), takes the name from the ``id`` grain. If ``False``, the provided name is used. group Group name under with device will appear in Server Density dashboard. Default - `None`. agent_version The agent version you want to use. Valid values are 1 or 2. Default - 1. salt_params If ``True`` (default), needed config parameters will be sourced from grains and from :mod:`status.all_status <salt.modules.status.all_status>`. params Add parameters that you want to appear in the Server Density dashboard. Will overwrite the `salt_params` parameters. For more info, see the `API docs`__. .. __: https://apidocs.serverdensity.com/Inventory/Devices/Creating Usage example: .. code-block:: yaml 'server_name': serverdensity_device.monitored .. code-block:: yaml 'server_name': serverdensity_device.monitored: - group: web-servers .. code-block:: yaml 'my_special_server': serverdensity_device.monitored: - salt_name: False - group: web-servers - cpuCores: 2 - os: '{"code": "linux", "name": "Linux"}' ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} params_from_salt = _get_salt_params() if salt_name: name = params_from_salt.pop('name') ret['name'] = name else: params_from_salt.pop('name') if group: params['group'] = group if agent_version != 2: # Anything different from 2 will fallback into the v1. agent_version = 1 # override salt_params with given params if salt_params: for key, value in six.iteritems(params): params_from_salt[key] = value params_to_use = params_from_salt else: params_to_use = params device_in_sd = True if __salt__['serverdensity_device.ls'](name=name) else False sd_agent_installed = True if 'sd-agent' in __salt__['pkg.list_pkgs']() else False if device_in_sd and sd_agent_installed: ret['result'] = True ret['comment'] = 'Such server name already exists in this Server Density account. And sd-agent is installed' ret['changes'] = {} return ret if __opts__['test']: if not device_in_sd or not sd_agent_installed: ret['result'] = None ret['comment'] = 'Server Density agent is set to be installed and/or device created in the Server Density DB' return ret else: ret['result'] = None ret['comment'] = 'Server Density agent is already installed, or device already exists' return ret elif device_in_sd: device = __salt__['serverdensity_device.ls'](name=name)[0] agent_key = device['agentKey'] ret['comment'] = 'Device was already in Server Density db.' if not device_in_sd: device = __salt__['serverdensity_device.create'](name, **params_from_salt) agent_key = device['agentKey'] ret['comment'] = 'Device created in Server Density db.' ret['changes'] = {'device_created': device} else: ret['result'] = False ret['comment'] = 'Failed to create device in Server Density DB and this device does not exist in db either.' ret['changes'] = {} installed_agent = __salt__['serverdensity_device.install_agent'](agent_key, agent_version) ret['result'] = True ret['comment'] = 'Successfully installed agent and created device in Server Density db.' ret['changes'] = {'created_device': device, 'installed_agent': installed_agent} return ret
saltstack/salt
salt/states/serverdensity_device.py
monitored
python
def monitored(name, group=None, salt_name=True, salt_params=True, agent_version=1, **params): ''' Device is monitored with Server Density. name Device name in Server Density. salt_name If ``True`` (default), takes the name from the ``id`` grain. If ``False``, the provided name is used. group Group name under with device will appear in Server Density dashboard. Default - `None`. agent_version The agent version you want to use. Valid values are 1 or 2. Default - 1. salt_params If ``True`` (default), needed config parameters will be sourced from grains and from :mod:`status.all_status <salt.modules.status.all_status>`. params Add parameters that you want to appear in the Server Density dashboard. Will overwrite the `salt_params` parameters. For more info, see the `API docs`__. .. __: https://apidocs.serverdensity.com/Inventory/Devices/Creating Usage example: .. code-block:: yaml 'server_name': serverdensity_device.monitored .. code-block:: yaml 'server_name': serverdensity_device.monitored: - group: web-servers .. code-block:: yaml 'my_special_server': serverdensity_device.monitored: - salt_name: False - group: web-servers - cpuCores: 2 - os: '{"code": "linux", "name": "Linux"}' ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} params_from_salt = _get_salt_params() if salt_name: name = params_from_salt.pop('name') ret['name'] = name else: params_from_salt.pop('name') if group: params['group'] = group if agent_version != 2: # Anything different from 2 will fallback into the v1. agent_version = 1 # override salt_params with given params if salt_params: for key, value in six.iteritems(params): params_from_salt[key] = value params_to_use = params_from_salt else: params_to_use = params device_in_sd = True if __salt__['serverdensity_device.ls'](name=name) else False sd_agent_installed = True if 'sd-agent' in __salt__['pkg.list_pkgs']() else False if device_in_sd and sd_agent_installed: ret['result'] = True ret['comment'] = 'Such server name already exists in this Server Density account. And sd-agent is installed' ret['changes'] = {} return ret if __opts__['test']: if not device_in_sd or not sd_agent_installed: ret['result'] = None ret['comment'] = 'Server Density agent is set to be installed and/or device created in the Server Density DB' return ret else: ret['result'] = None ret['comment'] = 'Server Density agent is already installed, or device already exists' return ret elif device_in_sd: device = __salt__['serverdensity_device.ls'](name=name)[0] agent_key = device['agentKey'] ret['comment'] = 'Device was already in Server Density db.' if not device_in_sd: device = __salt__['serverdensity_device.create'](name, **params_from_salt) agent_key = device['agentKey'] ret['comment'] = 'Device created in Server Density db.' ret['changes'] = {'device_created': device} else: ret['result'] = False ret['comment'] = 'Failed to create device in Server Density DB and this device does not exist in db either.' ret['changes'] = {} installed_agent = __salt__['serverdensity_device.install_agent'](agent_key, agent_version) ret['result'] = True ret['comment'] = 'Successfully installed agent and created device in Server Density db.' ret['changes'] = {'created_device': device, 'installed_agent': installed_agent} return ret
Device is monitored with Server Density. name Device name in Server Density. salt_name If ``True`` (default), takes the name from the ``id`` grain. If ``False``, the provided name is used. group Group name under with device will appear in Server Density dashboard. Default - `None`. agent_version The agent version you want to use. Valid values are 1 or 2. Default - 1. salt_params If ``True`` (default), needed config parameters will be sourced from grains and from :mod:`status.all_status <salt.modules.status.all_status>`. params Add parameters that you want to appear in the Server Density dashboard. Will overwrite the `salt_params` parameters. For more info, see the `API docs`__. .. __: https://apidocs.serverdensity.com/Inventory/Devices/Creating Usage example: .. code-block:: yaml 'server_name': serverdensity_device.monitored .. code-block:: yaml 'server_name': serverdensity_device.monitored: - group: web-servers .. code-block:: yaml 'my_special_server': serverdensity_device.monitored: - salt_name: False - group: web-servers - cpuCores: 2 - os: '{"code": "linux", "name": "Linux"}'
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/serverdensity_device.py#L98-L215
[ "def iteritems(d, **kw):\n return d.iteritems(**kw)\n", "def _get_salt_params():\n '''\n Try to get all sort of parameters for Server Density server info.\n\n NOTE: Missing publicDNS and publicIPs parameters. There might be way of\n getting them with salt-cloud.\n '''\n all_stats = __salt__['status.all_status']()\n all_grains = __salt__['grains.items']()\n params = {}\n try:\n params['name'] = all_grains['id']\n params['hostname'] = all_grains['host']\n if all_grains['kernel'] == 'Darwin':\n sd_os = {'code': 'mac', 'name': 'Mac'}\n else:\n sd_os = {'code': all_grains['kernel'].lower(), 'name': all_grains['kernel']}\n params['os'] = salt.utils.json.dumps(sd_os)\n params['cpuCores'] = all_stats['cpuinfo']['cpu cores']\n params['installedRAM'] = six.text_type(int(all_stats['meminfo']['MemTotal']['value']) / 1024)\n params['swapSpace'] = six.text_type(int(all_stats['meminfo']['SwapTotal']['value']) / 1024)\n params['privateIPs'] = salt.utils.json.dumps(all_grains['fqdn_ip4'])\n params['privateDNS'] = salt.utils.json.dumps(all_grains['fqdn'])\n except KeyError:\n pass\n\n return params\n" ]
# -*- coding: utf-8 -*- ''' Monitor Server with Server Density ================================== .. versionadded:: 2014.7.0 `Server Density <https://www.serverdensity.com/>`_ Is a hosted monitoring service. .. warning:: This state module is beta. It might be changed later to include more or less automation. .. note:: This state module requires a pillar for authentication with Server Density To install a v1 agent: .. code-block:: yaml serverdensity: api_token: "b97da80a41c4f61bff05975ee51eb1aa" account_url: "https://your-account.serverdensity.io" To install a v2 agent: .. code-block:: yaml serverdensity: api_token: "b97da80a41c4f61bff05975ee51eb1aa" account_name: "your-account" .. note:: Although Server Density allows duplicate device names in its database, this module will raise an exception if you try monitoring devices with the same name. Example: .. code-block:: yaml 'server_name': serverdensity_device.monitored ''' # Import python libs from __future__ import absolute_import, unicode_literals, print_function import logging # Import Salt libs import salt.utils.json # Import 3rd-party libs from salt.ext import six # TODO: # # Add a plugin support # Add notification support log = logging.getLogger(__name__) def _get_salt_params(): ''' Try to get all sort of parameters for Server Density server info. NOTE: Missing publicDNS and publicIPs parameters. There might be way of getting them with salt-cloud. ''' all_stats = __salt__['status.all_status']() all_grains = __salt__['grains.items']() params = {} try: params['name'] = all_grains['id'] params['hostname'] = all_grains['host'] if all_grains['kernel'] == 'Darwin': sd_os = {'code': 'mac', 'name': 'Mac'} else: sd_os = {'code': all_grains['kernel'].lower(), 'name': all_grains['kernel']} params['os'] = salt.utils.json.dumps(sd_os) params['cpuCores'] = all_stats['cpuinfo']['cpu cores'] params['installedRAM'] = six.text_type(int(all_stats['meminfo']['MemTotal']['value']) / 1024) params['swapSpace'] = six.text_type(int(all_stats['meminfo']['SwapTotal']['value']) / 1024) params['privateIPs'] = salt.utils.json.dumps(all_grains['fqdn_ip4']) params['privateDNS'] = salt.utils.json.dumps(all_grains['fqdn']) except KeyError: pass return params
saltstack/salt
salt/queues/sqlite_queue.py
_conn
python
def _conn(queue): ''' Return an sqlite connection ''' queue_dir = __opts__['sqlite_queue_dir'] db = os.path.join(queue_dir, '{0}.db'.format(queue)) log.debug('Connecting to: %s', db) con = sqlite3.connect(db) tables = _list_tables(con) if queue not in tables: _create_table(con, queue) return con
Return an sqlite connection
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/queues/sqlite_queue.py#L40-L52
null
# -*- coding: utf-8 -*- ''' .. versionadded:: 2014.7.0 This is the default local master event queue built on sqlite. By default, an sqlite3 database file is created in the `sqlite_queue_dir` which is found at:: /var/cache/salt/master/queues It's possible to store the sqlite3 database files by setting `sqlite_queue_dir` to another location:: sqlite_queue_dir: /home/myuser/salt/master/queues ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import glob import logging import os import re import sqlite3 import salt.utils.json from salt.exceptions import SaltInvocationError # Import 3rd-party libs from salt.ext import six log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'sqlite' def __virtual__(): # All python servers should have sqlite3 and so be able to use # this default sqlite queue system return __virtualname__ def _list_tables(con): with con: cur = con.cursor() cmd = 'SELECT name FROM sqlite_master WHERE type = "table"' log.debug('SQL Query: %s', cmd) cur.execute(cmd) result = cur.fetchall() return [x[0] for x in result] def _create_table(con, queue): with con: cur = con.cursor() cmd = 'CREATE TABLE {0}(id INTEGER PRIMARY KEY, '\ 'name TEXT UNIQUE)'.format(queue) log.debug('SQL Query: %s', cmd) cur.execute(cmd) return True def _list_items(queue): ''' Private function to list contents of a queue ''' con = _conn(queue) with con: cur = con.cursor() cmd = 'SELECT name FROM {0}'.format(queue) log.debug('SQL Query: %s', cmd) cur.execute(cmd) contents = cur.fetchall() return contents def _list_queues(): ''' Return a list of sqlite databases in the queue_dir ''' queue_dir = __opts__['sqlite_queue_dir'] files = os.path.join(queue_dir, '*.db') paths = glob.glob(files) queues = [os.path.splitext(os.path.basename(item))[0] for item in paths] return queues def list_queues(): ''' Return a list of Salt Queues on the Salt Master ''' queues = _list_queues() return queues def list_items(queue): ''' List contents of a queue ''' itemstuple = _list_items(queue) items = [item[0] for item in itemstuple] return items def list_length(queue): ''' Provide the number of items in a queue ''' items = _list_items(queue) return len(items) def _quote_escape(item): ''' Make sure single quotes are escaped properly in sqlite3 fashion. e.g.: ' becomes '' ''' rex_sqlquote = re.compile("'", re.M) return rex_sqlquote.sub("''", item) def insert(queue, items): ''' Add an item or items to a queue ''' con = _conn(queue) with con: cur = con.cursor() if isinstance(items, six.string_types): items = _quote_escape(items) cmd = '''INSERT INTO {0}(name) VALUES('{1}')'''.format(queue, items) log.debug('SQL Query: %s', cmd) try: cur.execute(cmd) except sqlite3.IntegrityError as esc: return('Item already exists in this queue. ' 'sqlite error: {0}'.format(esc)) if isinstance(items, list): items = [_quote_escape(el) for el in items] cmd = "INSERT INTO {0}(name) VALUES(?)".format(queue) log.debug('SQL Query: %s', cmd) newitems = [] for item in items: newitems.append((item,)) # we need a list of one item tuples here try: cur.executemany(cmd, newitems) except sqlite3.IntegrityError as esc: return('One or more items already exists in this queue. ' 'sqlite error: {0}'.format(esc)) if isinstance(items, dict): items = salt.utils.json.dumps(items).replace('"', "'") items = _quote_escape(items) cmd = str('''INSERT INTO {0}(name) VALUES('{1}')''').format(queue, items) # future lint: disable=blacklisted-function log.debug('SQL Query: %s', cmd) try: cur.execute(cmd) except sqlite3.IntegrityError as esc: return('Item already exists in this queue. ' 'sqlite error: {0}'.format(esc)) return True def delete(queue, items): ''' Delete an item or items from a queue ''' con = _conn(queue) with con: cur = con.cursor() if isinstance(items, six.string_types): items = _quote_escape(items) cmd = """DELETE FROM {0} WHERE name = '{1}'""".format(queue, items) log.debug('SQL Query: %s', cmd) cur.execute(cmd) return True if isinstance(items, list): items = [_quote_escape(el) for el in items] cmd = 'DELETE FROM {0} WHERE name = ?'.format(queue) log.debug('SQL Query: %s', cmd) newitems = [] for item in items: newitems.append((item,)) # we need a list of one item tuples here cur.executemany(cmd, newitems) if isinstance(items, dict): items = salt.utils.json.dumps(items).replace('"', "'") items = _quote_escape(items) cmd = ("""DELETE FROM {0} WHERE name = '{1}'""").format(queue, items) # future lint: disable=blacklisted-function log.debug('SQL Query: %s', cmd) cur.execute(cmd) return True return True def pop(queue, quantity=1, is_runner=False): ''' Pop one or more or all items from the queue return them. ''' cmd = 'SELECT name FROM {0}'.format(queue) if quantity != 'all': try: quantity = int(quantity) except ValueError as exc: error_txt = ('Quantity must be an integer or "all".\n' 'Error: "{0}".'.format(exc)) raise SaltInvocationError(error_txt) cmd = ''.join([cmd, ' LIMIT {0}'.format(quantity)]) log.debug('SQL Query: %s', cmd) con = _conn(queue) items = [] with con: cur = con.cursor() result = cur.execute(cmd).fetchall() if result: items = [item[0] for item in result] itemlist = '","'.join(items) _quote_escape(itemlist) del_cmd = '''DELETE FROM {0} WHERE name IN ("{1}")'''.format( queue, itemlist) log.debug('SQL Query: %s', del_cmd) cur.execute(del_cmd) con.commit() if is_runner: items = [salt.utils.json.loads(item[0].replace("'", '"')) for item in result] log.info(items) return items
saltstack/salt
salt/queues/sqlite_queue.py
_list_items
python
def _list_items(queue): ''' Private function to list contents of a queue ''' con = _conn(queue) with con: cur = con.cursor() cmd = 'SELECT name FROM {0}'.format(queue) log.debug('SQL Query: %s', cmd) cur.execute(cmd) contents = cur.fetchall() return contents
Private function to list contents of a queue
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/queues/sqlite_queue.py#L75-L86
[ "def _conn(queue):\n '''\n Return an sqlite connection\n '''\n queue_dir = __opts__['sqlite_queue_dir']\n db = os.path.join(queue_dir, '{0}.db'.format(queue))\n log.debug('Connecting to: %s', db)\n\n con = sqlite3.connect(db)\n tables = _list_tables(con)\n if queue not in tables:\n _create_table(con, queue)\n return con\n" ]
# -*- coding: utf-8 -*- ''' .. versionadded:: 2014.7.0 This is the default local master event queue built on sqlite. By default, an sqlite3 database file is created in the `sqlite_queue_dir` which is found at:: /var/cache/salt/master/queues It's possible to store the sqlite3 database files by setting `sqlite_queue_dir` to another location:: sqlite_queue_dir: /home/myuser/salt/master/queues ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import glob import logging import os import re import sqlite3 import salt.utils.json from salt.exceptions import SaltInvocationError # Import 3rd-party libs from salt.ext import six log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'sqlite' def __virtual__(): # All python servers should have sqlite3 and so be able to use # this default sqlite queue system return __virtualname__ def _conn(queue): ''' Return an sqlite connection ''' queue_dir = __opts__['sqlite_queue_dir'] db = os.path.join(queue_dir, '{0}.db'.format(queue)) log.debug('Connecting to: %s', db) con = sqlite3.connect(db) tables = _list_tables(con) if queue not in tables: _create_table(con, queue) return con def _list_tables(con): with con: cur = con.cursor() cmd = 'SELECT name FROM sqlite_master WHERE type = "table"' log.debug('SQL Query: %s', cmd) cur.execute(cmd) result = cur.fetchall() return [x[0] for x in result] def _create_table(con, queue): with con: cur = con.cursor() cmd = 'CREATE TABLE {0}(id INTEGER PRIMARY KEY, '\ 'name TEXT UNIQUE)'.format(queue) log.debug('SQL Query: %s', cmd) cur.execute(cmd) return True def _list_queues(): ''' Return a list of sqlite databases in the queue_dir ''' queue_dir = __opts__['sqlite_queue_dir'] files = os.path.join(queue_dir, '*.db') paths = glob.glob(files) queues = [os.path.splitext(os.path.basename(item))[0] for item in paths] return queues def list_queues(): ''' Return a list of Salt Queues on the Salt Master ''' queues = _list_queues() return queues def list_items(queue): ''' List contents of a queue ''' itemstuple = _list_items(queue) items = [item[0] for item in itemstuple] return items def list_length(queue): ''' Provide the number of items in a queue ''' items = _list_items(queue) return len(items) def _quote_escape(item): ''' Make sure single quotes are escaped properly in sqlite3 fashion. e.g.: ' becomes '' ''' rex_sqlquote = re.compile("'", re.M) return rex_sqlquote.sub("''", item) def insert(queue, items): ''' Add an item or items to a queue ''' con = _conn(queue) with con: cur = con.cursor() if isinstance(items, six.string_types): items = _quote_escape(items) cmd = '''INSERT INTO {0}(name) VALUES('{1}')'''.format(queue, items) log.debug('SQL Query: %s', cmd) try: cur.execute(cmd) except sqlite3.IntegrityError as esc: return('Item already exists in this queue. ' 'sqlite error: {0}'.format(esc)) if isinstance(items, list): items = [_quote_escape(el) for el in items] cmd = "INSERT INTO {0}(name) VALUES(?)".format(queue) log.debug('SQL Query: %s', cmd) newitems = [] for item in items: newitems.append((item,)) # we need a list of one item tuples here try: cur.executemany(cmd, newitems) except sqlite3.IntegrityError as esc: return('One or more items already exists in this queue. ' 'sqlite error: {0}'.format(esc)) if isinstance(items, dict): items = salt.utils.json.dumps(items).replace('"', "'") items = _quote_escape(items) cmd = str('''INSERT INTO {0}(name) VALUES('{1}')''').format(queue, items) # future lint: disable=blacklisted-function log.debug('SQL Query: %s', cmd) try: cur.execute(cmd) except sqlite3.IntegrityError as esc: return('Item already exists in this queue. ' 'sqlite error: {0}'.format(esc)) return True def delete(queue, items): ''' Delete an item or items from a queue ''' con = _conn(queue) with con: cur = con.cursor() if isinstance(items, six.string_types): items = _quote_escape(items) cmd = """DELETE FROM {0} WHERE name = '{1}'""".format(queue, items) log.debug('SQL Query: %s', cmd) cur.execute(cmd) return True if isinstance(items, list): items = [_quote_escape(el) for el in items] cmd = 'DELETE FROM {0} WHERE name = ?'.format(queue) log.debug('SQL Query: %s', cmd) newitems = [] for item in items: newitems.append((item,)) # we need a list of one item tuples here cur.executemany(cmd, newitems) if isinstance(items, dict): items = salt.utils.json.dumps(items).replace('"', "'") items = _quote_escape(items) cmd = ("""DELETE FROM {0} WHERE name = '{1}'""").format(queue, items) # future lint: disable=blacklisted-function log.debug('SQL Query: %s', cmd) cur.execute(cmd) return True return True def pop(queue, quantity=1, is_runner=False): ''' Pop one or more or all items from the queue return them. ''' cmd = 'SELECT name FROM {0}'.format(queue) if quantity != 'all': try: quantity = int(quantity) except ValueError as exc: error_txt = ('Quantity must be an integer or "all".\n' 'Error: "{0}".'.format(exc)) raise SaltInvocationError(error_txt) cmd = ''.join([cmd, ' LIMIT {0}'.format(quantity)]) log.debug('SQL Query: %s', cmd) con = _conn(queue) items = [] with con: cur = con.cursor() result = cur.execute(cmd).fetchall() if result: items = [item[0] for item in result] itemlist = '","'.join(items) _quote_escape(itemlist) del_cmd = '''DELETE FROM {0} WHERE name IN ("{1}")'''.format( queue, itemlist) log.debug('SQL Query: %s', del_cmd) cur.execute(del_cmd) con.commit() if is_runner: items = [salt.utils.json.loads(item[0].replace("'", '"')) for item in result] log.info(items) return items
saltstack/salt
salt/queues/sqlite_queue.py
_list_queues
python
def _list_queues(): ''' Return a list of sqlite databases in the queue_dir ''' queue_dir = __opts__['sqlite_queue_dir'] files = os.path.join(queue_dir, '*.db') paths = glob.glob(files) queues = [os.path.splitext(os.path.basename(item))[0] for item in paths] return queues
Return a list of sqlite databases in the queue_dir
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/queues/sqlite_queue.py#L89-L98
null
# -*- coding: utf-8 -*- ''' .. versionadded:: 2014.7.0 This is the default local master event queue built on sqlite. By default, an sqlite3 database file is created in the `sqlite_queue_dir` which is found at:: /var/cache/salt/master/queues It's possible to store the sqlite3 database files by setting `sqlite_queue_dir` to another location:: sqlite_queue_dir: /home/myuser/salt/master/queues ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import glob import logging import os import re import sqlite3 import salt.utils.json from salt.exceptions import SaltInvocationError # Import 3rd-party libs from salt.ext import six log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'sqlite' def __virtual__(): # All python servers should have sqlite3 and so be able to use # this default sqlite queue system return __virtualname__ def _conn(queue): ''' Return an sqlite connection ''' queue_dir = __opts__['sqlite_queue_dir'] db = os.path.join(queue_dir, '{0}.db'.format(queue)) log.debug('Connecting to: %s', db) con = sqlite3.connect(db) tables = _list_tables(con) if queue not in tables: _create_table(con, queue) return con def _list_tables(con): with con: cur = con.cursor() cmd = 'SELECT name FROM sqlite_master WHERE type = "table"' log.debug('SQL Query: %s', cmd) cur.execute(cmd) result = cur.fetchall() return [x[0] for x in result] def _create_table(con, queue): with con: cur = con.cursor() cmd = 'CREATE TABLE {0}(id INTEGER PRIMARY KEY, '\ 'name TEXT UNIQUE)'.format(queue) log.debug('SQL Query: %s', cmd) cur.execute(cmd) return True def _list_items(queue): ''' Private function to list contents of a queue ''' con = _conn(queue) with con: cur = con.cursor() cmd = 'SELECT name FROM {0}'.format(queue) log.debug('SQL Query: %s', cmd) cur.execute(cmd) contents = cur.fetchall() return contents def list_queues(): ''' Return a list of Salt Queues on the Salt Master ''' queues = _list_queues() return queues def list_items(queue): ''' List contents of a queue ''' itemstuple = _list_items(queue) items = [item[0] for item in itemstuple] return items def list_length(queue): ''' Provide the number of items in a queue ''' items = _list_items(queue) return len(items) def _quote_escape(item): ''' Make sure single quotes are escaped properly in sqlite3 fashion. e.g.: ' becomes '' ''' rex_sqlquote = re.compile("'", re.M) return rex_sqlquote.sub("''", item) def insert(queue, items): ''' Add an item or items to a queue ''' con = _conn(queue) with con: cur = con.cursor() if isinstance(items, six.string_types): items = _quote_escape(items) cmd = '''INSERT INTO {0}(name) VALUES('{1}')'''.format(queue, items) log.debug('SQL Query: %s', cmd) try: cur.execute(cmd) except sqlite3.IntegrityError as esc: return('Item already exists in this queue. ' 'sqlite error: {0}'.format(esc)) if isinstance(items, list): items = [_quote_escape(el) for el in items] cmd = "INSERT INTO {0}(name) VALUES(?)".format(queue) log.debug('SQL Query: %s', cmd) newitems = [] for item in items: newitems.append((item,)) # we need a list of one item tuples here try: cur.executemany(cmd, newitems) except sqlite3.IntegrityError as esc: return('One or more items already exists in this queue. ' 'sqlite error: {0}'.format(esc)) if isinstance(items, dict): items = salt.utils.json.dumps(items).replace('"', "'") items = _quote_escape(items) cmd = str('''INSERT INTO {0}(name) VALUES('{1}')''').format(queue, items) # future lint: disable=blacklisted-function log.debug('SQL Query: %s', cmd) try: cur.execute(cmd) except sqlite3.IntegrityError as esc: return('Item already exists in this queue. ' 'sqlite error: {0}'.format(esc)) return True def delete(queue, items): ''' Delete an item or items from a queue ''' con = _conn(queue) with con: cur = con.cursor() if isinstance(items, six.string_types): items = _quote_escape(items) cmd = """DELETE FROM {0} WHERE name = '{1}'""".format(queue, items) log.debug('SQL Query: %s', cmd) cur.execute(cmd) return True if isinstance(items, list): items = [_quote_escape(el) for el in items] cmd = 'DELETE FROM {0} WHERE name = ?'.format(queue) log.debug('SQL Query: %s', cmd) newitems = [] for item in items: newitems.append((item,)) # we need a list of one item tuples here cur.executemany(cmd, newitems) if isinstance(items, dict): items = salt.utils.json.dumps(items).replace('"', "'") items = _quote_escape(items) cmd = ("""DELETE FROM {0} WHERE name = '{1}'""").format(queue, items) # future lint: disable=blacklisted-function log.debug('SQL Query: %s', cmd) cur.execute(cmd) return True return True def pop(queue, quantity=1, is_runner=False): ''' Pop one or more or all items from the queue return them. ''' cmd = 'SELECT name FROM {0}'.format(queue) if quantity != 'all': try: quantity = int(quantity) except ValueError as exc: error_txt = ('Quantity must be an integer or "all".\n' 'Error: "{0}".'.format(exc)) raise SaltInvocationError(error_txt) cmd = ''.join([cmd, ' LIMIT {0}'.format(quantity)]) log.debug('SQL Query: %s', cmd) con = _conn(queue) items = [] with con: cur = con.cursor() result = cur.execute(cmd).fetchall() if result: items = [item[0] for item in result] itemlist = '","'.join(items) _quote_escape(itemlist) del_cmd = '''DELETE FROM {0} WHERE name IN ("{1}")'''.format( queue, itemlist) log.debug('SQL Query: %s', del_cmd) cur.execute(del_cmd) con.commit() if is_runner: items = [salt.utils.json.loads(item[0].replace("'", '"')) for item in result] log.info(items) return items
saltstack/salt
salt/queues/sqlite_queue.py
list_items
python
def list_items(queue): ''' List contents of a queue ''' itemstuple = _list_items(queue) items = [item[0] for item in itemstuple] return items
List contents of a queue
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/queues/sqlite_queue.py#L109-L115
[ "def _list_items(queue):\n '''\n Private function to list contents of a queue\n '''\n con = _conn(queue)\n with con:\n cur = con.cursor()\n cmd = 'SELECT name FROM {0}'.format(queue)\n log.debug('SQL Query: %s', cmd)\n cur.execute(cmd)\n contents = cur.fetchall()\n return contents\n" ]
# -*- coding: utf-8 -*- ''' .. versionadded:: 2014.7.0 This is the default local master event queue built on sqlite. By default, an sqlite3 database file is created in the `sqlite_queue_dir` which is found at:: /var/cache/salt/master/queues It's possible to store the sqlite3 database files by setting `sqlite_queue_dir` to another location:: sqlite_queue_dir: /home/myuser/salt/master/queues ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import glob import logging import os import re import sqlite3 import salt.utils.json from salt.exceptions import SaltInvocationError # Import 3rd-party libs from salt.ext import six log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'sqlite' def __virtual__(): # All python servers should have sqlite3 and so be able to use # this default sqlite queue system return __virtualname__ def _conn(queue): ''' Return an sqlite connection ''' queue_dir = __opts__['sqlite_queue_dir'] db = os.path.join(queue_dir, '{0}.db'.format(queue)) log.debug('Connecting to: %s', db) con = sqlite3.connect(db) tables = _list_tables(con) if queue not in tables: _create_table(con, queue) return con def _list_tables(con): with con: cur = con.cursor() cmd = 'SELECT name FROM sqlite_master WHERE type = "table"' log.debug('SQL Query: %s', cmd) cur.execute(cmd) result = cur.fetchall() return [x[0] for x in result] def _create_table(con, queue): with con: cur = con.cursor() cmd = 'CREATE TABLE {0}(id INTEGER PRIMARY KEY, '\ 'name TEXT UNIQUE)'.format(queue) log.debug('SQL Query: %s', cmd) cur.execute(cmd) return True def _list_items(queue): ''' Private function to list contents of a queue ''' con = _conn(queue) with con: cur = con.cursor() cmd = 'SELECT name FROM {0}'.format(queue) log.debug('SQL Query: %s', cmd) cur.execute(cmd) contents = cur.fetchall() return contents def _list_queues(): ''' Return a list of sqlite databases in the queue_dir ''' queue_dir = __opts__['sqlite_queue_dir'] files = os.path.join(queue_dir, '*.db') paths = glob.glob(files) queues = [os.path.splitext(os.path.basename(item))[0] for item in paths] return queues def list_queues(): ''' Return a list of Salt Queues on the Salt Master ''' queues = _list_queues() return queues def list_length(queue): ''' Provide the number of items in a queue ''' items = _list_items(queue) return len(items) def _quote_escape(item): ''' Make sure single quotes are escaped properly in sqlite3 fashion. e.g.: ' becomes '' ''' rex_sqlquote = re.compile("'", re.M) return rex_sqlquote.sub("''", item) def insert(queue, items): ''' Add an item or items to a queue ''' con = _conn(queue) with con: cur = con.cursor() if isinstance(items, six.string_types): items = _quote_escape(items) cmd = '''INSERT INTO {0}(name) VALUES('{1}')'''.format(queue, items) log.debug('SQL Query: %s', cmd) try: cur.execute(cmd) except sqlite3.IntegrityError as esc: return('Item already exists in this queue. ' 'sqlite error: {0}'.format(esc)) if isinstance(items, list): items = [_quote_escape(el) for el in items] cmd = "INSERT INTO {0}(name) VALUES(?)".format(queue) log.debug('SQL Query: %s', cmd) newitems = [] for item in items: newitems.append((item,)) # we need a list of one item tuples here try: cur.executemany(cmd, newitems) except sqlite3.IntegrityError as esc: return('One or more items already exists in this queue. ' 'sqlite error: {0}'.format(esc)) if isinstance(items, dict): items = salt.utils.json.dumps(items).replace('"', "'") items = _quote_escape(items) cmd = str('''INSERT INTO {0}(name) VALUES('{1}')''').format(queue, items) # future lint: disable=blacklisted-function log.debug('SQL Query: %s', cmd) try: cur.execute(cmd) except sqlite3.IntegrityError as esc: return('Item already exists in this queue. ' 'sqlite error: {0}'.format(esc)) return True def delete(queue, items): ''' Delete an item or items from a queue ''' con = _conn(queue) with con: cur = con.cursor() if isinstance(items, six.string_types): items = _quote_escape(items) cmd = """DELETE FROM {0} WHERE name = '{1}'""".format(queue, items) log.debug('SQL Query: %s', cmd) cur.execute(cmd) return True if isinstance(items, list): items = [_quote_escape(el) for el in items] cmd = 'DELETE FROM {0} WHERE name = ?'.format(queue) log.debug('SQL Query: %s', cmd) newitems = [] for item in items: newitems.append((item,)) # we need a list of one item tuples here cur.executemany(cmd, newitems) if isinstance(items, dict): items = salt.utils.json.dumps(items).replace('"', "'") items = _quote_escape(items) cmd = ("""DELETE FROM {0} WHERE name = '{1}'""").format(queue, items) # future lint: disable=blacklisted-function log.debug('SQL Query: %s', cmd) cur.execute(cmd) return True return True def pop(queue, quantity=1, is_runner=False): ''' Pop one or more or all items from the queue return them. ''' cmd = 'SELECT name FROM {0}'.format(queue) if quantity != 'all': try: quantity = int(quantity) except ValueError as exc: error_txt = ('Quantity must be an integer or "all".\n' 'Error: "{0}".'.format(exc)) raise SaltInvocationError(error_txt) cmd = ''.join([cmd, ' LIMIT {0}'.format(quantity)]) log.debug('SQL Query: %s', cmd) con = _conn(queue) items = [] with con: cur = con.cursor() result = cur.execute(cmd).fetchall() if result: items = [item[0] for item in result] itemlist = '","'.join(items) _quote_escape(itemlist) del_cmd = '''DELETE FROM {0} WHERE name IN ("{1}")'''.format( queue, itemlist) log.debug('SQL Query: %s', del_cmd) cur.execute(del_cmd) con.commit() if is_runner: items = [salt.utils.json.loads(item[0].replace("'", '"')) for item in result] log.info(items) return items
saltstack/salt
salt/queues/sqlite_queue.py
_quote_escape
python
def _quote_escape(item): ''' Make sure single quotes are escaped properly in sqlite3 fashion. e.g.: ' becomes '' ''' rex_sqlquote = re.compile("'", re.M) return rex_sqlquote.sub("''", item)
Make sure single quotes are escaped properly in sqlite3 fashion. e.g.: ' becomes ''
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/queues/sqlite_queue.py#L126-L134
null
# -*- coding: utf-8 -*- ''' .. versionadded:: 2014.7.0 This is the default local master event queue built on sqlite. By default, an sqlite3 database file is created in the `sqlite_queue_dir` which is found at:: /var/cache/salt/master/queues It's possible to store the sqlite3 database files by setting `sqlite_queue_dir` to another location:: sqlite_queue_dir: /home/myuser/salt/master/queues ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import glob import logging import os import re import sqlite3 import salt.utils.json from salt.exceptions import SaltInvocationError # Import 3rd-party libs from salt.ext import six log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'sqlite' def __virtual__(): # All python servers should have sqlite3 and so be able to use # this default sqlite queue system return __virtualname__ def _conn(queue): ''' Return an sqlite connection ''' queue_dir = __opts__['sqlite_queue_dir'] db = os.path.join(queue_dir, '{0}.db'.format(queue)) log.debug('Connecting to: %s', db) con = sqlite3.connect(db) tables = _list_tables(con) if queue not in tables: _create_table(con, queue) return con def _list_tables(con): with con: cur = con.cursor() cmd = 'SELECT name FROM sqlite_master WHERE type = "table"' log.debug('SQL Query: %s', cmd) cur.execute(cmd) result = cur.fetchall() return [x[0] for x in result] def _create_table(con, queue): with con: cur = con.cursor() cmd = 'CREATE TABLE {0}(id INTEGER PRIMARY KEY, '\ 'name TEXT UNIQUE)'.format(queue) log.debug('SQL Query: %s', cmd) cur.execute(cmd) return True def _list_items(queue): ''' Private function to list contents of a queue ''' con = _conn(queue) with con: cur = con.cursor() cmd = 'SELECT name FROM {0}'.format(queue) log.debug('SQL Query: %s', cmd) cur.execute(cmd) contents = cur.fetchall() return contents def _list_queues(): ''' Return a list of sqlite databases in the queue_dir ''' queue_dir = __opts__['sqlite_queue_dir'] files = os.path.join(queue_dir, '*.db') paths = glob.glob(files) queues = [os.path.splitext(os.path.basename(item))[0] for item in paths] return queues def list_queues(): ''' Return a list of Salt Queues on the Salt Master ''' queues = _list_queues() return queues def list_items(queue): ''' List contents of a queue ''' itemstuple = _list_items(queue) items = [item[0] for item in itemstuple] return items def list_length(queue): ''' Provide the number of items in a queue ''' items = _list_items(queue) return len(items) def insert(queue, items): ''' Add an item or items to a queue ''' con = _conn(queue) with con: cur = con.cursor() if isinstance(items, six.string_types): items = _quote_escape(items) cmd = '''INSERT INTO {0}(name) VALUES('{1}')'''.format(queue, items) log.debug('SQL Query: %s', cmd) try: cur.execute(cmd) except sqlite3.IntegrityError as esc: return('Item already exists in this queue. ' 'sqlite error: {0}'.format(esc)) if isinstance(items, list): items = [_quote_escape(el) for el in items] cmd = "INSERT INTO {0}(name) VALUES(?)".format(queue) log.debug('SQL Query: %s', cmd) newitems = [] for item in items: newitems.append((item,)) # we need a list of one item tuples here try: cur.executemany(cmd, newitems) except sqlite3.IntegrityError as esc: return('One or more items already exists in this queue. ' 'sqlite error: {0}'.format(esc)) if isinstance(items, dict): items = salt.utils.json.dumps(items).replace('"', "'") items = _quote_escape(items) cmd = str('''INSERT INTO {0}(name) VALUES('{1}')''').format(queue, items) # future lint: disable=blacklisted-function log.debug('SQL Query: %s', cmd) try: cur.execute(cmd) except sqlite3.IntegrityError as esc: return('Item already exists in this queue. ' 'sqlite error: {0}'.format(esc)) return True def delete(queue, items): ''' Delete an item or items from a queue ''' con = _conn(queue) with con: cur = con.cursor() if isinstance(items, six.string_types): items = _quote_escape(items) cmd = """DELETE FROM {0} WHERE name = '{1}'""".format(queue, items) log.debug('SQL Query: %s', cmd) cur.execute(cmd) return True if isinstance(items, list): items = [_quote_escape(el) for el in items] cmd = 'DELETE FROM {0} WHERE name = ?'.format(queue) log.debug('SQL Query: %s', cmd) newitems = [] for item in items: newitems.append((item,)) # we need a list of one item tuples here cur.executemany(cmd, newitems) if isinstance(items, dict): items = salt.utils.json.dumps(items).replace('"', "'") items = _quote_escape(items) cmd = ("""DELETE FROM {0} WHERE name = '{1}'""").format(queue, items) # future lint: disable=blacklisted-function log.debug('SQL Query: %s', cmd) cur.execute(cmd) return True return True def pop(queue, quantity=1, is_runner=False): ''' Pop one or more or all items from the queue return them. ''' cmd = 'SELECT name FROM {0}'.format(queue) if quantity != 'all': try: quantity = int(quantity) except ValueError as exc: error_txt = ('Quantity must be an integer or "all".\n' 'Error: "{0}".'.format(exc)) raise SaltInvocationError(error_txt) cmd = ''.join([cmd, ' LIMIT {0}'.format(quantity)]) log.debug('SQL Query: %s', cmd) con = _conn(queue) items = [] with con: cur = con.cursor() result = cur.execute(cmd).fetchall() if result: items = [item[0] for item in result] itemlist = '","'.join(items) _quote_escape(itemlist) del_cmd = '''DELETE FROM {0} WHERE name IN ("{1}")'''.format( queue, itemlist) log.debug('SQL Query: %s', del_cmd) cur.execute(del_cmd) con.commit() if is_runner: items = [salt.utils.json.loads(item[0].replace("'", '"')) for item in result] log.info(items) return items
saltstack/salt
salt/queues/sqlite_queue.py
delete
python
def delete(queue, items): ''' Delete an item or items from a queue ''' con = _conn(queue) with con: cur = con.cursor() if isinstance(items, six.string_types): items = _quote_escape(items) cmd = """DELETE FROM {0} WHERE name = '{1}'""".format(queue, items) log.debug('SQL Query: %s', cmd) cur.execute(cmd) return True if isinstance(items, list): items = [_quote_escape(el) for el in items] cmd = 'DELETE FROM {0} WHERE name = ?'.format(queue) log.debug('SQL Query: %s', cmd) newitems = [] for item in items: newitems.append((item,)) # we need a list of one item tuples here cur.executemany(cmd, newitems) if isinstance(items, dict): items = salt.utils.json.dumps(items).replace('"', "'") items = _quote_escape(items) cmd = ("""DELETE FROM {0} WHERE name = '{1}'""").format(queue, items) # future lint: disable=blacklisted-function log.debug('SQL Query: %s', cmd) cur.execute(cmd) return True return True
Delete an item or items from a queue
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/queues/sqlite_queue.py#L179-L208
[ "def dumps(obj, **kwargs):\n '''\n .. versionadded:: 2018.3.0\n\n Wraps json.dumps, and assumes that ensure_ascii is False (unless explicitly\n passed as True) for unicode compatibility. Note that setting it to True\n will mess up any unicode characters, as they will be dumped as the string\n literal version of the unicode code point.\n\n On Python 2, encodes the result to a str since json.dumps does not want\n unicode types.\n\n You can pass an alternate json module (loaded via import_json() above)\n using the _json_module argument)\n '''\n json_module = kwargs.pop('_json_module', json)\n orig_enc_func = kwargs.pop('default', lambda x: x)\n\n def _enc_func(obj):\n obj = ThreadLocalProxy.unproxy(obj)\n return orig_enc_func(obj)\n\n if 'ensure_ascii' not in kwargs:\n kwargs['ensure_ascii'] = False\n if six.PY2:\n obj = salt.utils.data.encode(obj)\n return json_module.dumps(obj, default=_enc_func, **kwargs) # future lint: blacklisted-function\n", "def _conn(queue):\n '''\n Return an sqlite connection\n '''\n queue_dir = __opts__['sqlite_queue_dir']\n db = os.path.join(queue_dir, '{0}.db'.format(queue))\n log.debug('Connecting to: %s', db)\n\n con = sqlite3.connect(db)\n tables = _list_tables(con)\n if queue not in tables:\n _create_table(con, queue)\n return con\n", "def _quote_escape(item):\n '''\n Make sure single quotes are escaped properly in sqlite3 fashion.\n e.g.: ' becomes ''\n '''\n\n rex_sqlquote = re.compile(\"'\", re.M)\n\n return rex_sqlquote.sub(\"''\", item)\n" ]
# -*- coding: utf-8 -*- ''' .. versionadded:: 2014.7.0 This is the default local master event queue built on sqlite. By default, an sqlite3 database file is created in the `sqlite_queue_dir` which is found at:: /var/cache/salt/master/queues It's possible to store the sqlite3 database files by setting `sqlite_queue_dir` to another location:: sqlite_queue_dir: /home/myuser/salt/master/queues ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import glob import logging import os import re import sqlite3 import salt.utils.json from salt.exceptions import SaltInvocationError # Import 3rd-party libs from salt.ext import six log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'sqlite' def __virtual__(): # All python servers should have sqlite3 and so be able to use # this default sqlite queue system return __virtualname__ def _conn(queue): ''' Return an sqlite connection ''' queue_dir = __opts__['sqlite_queue_dir'] db = os.path.join(queue_dir, '{0}.db'.format(queue)) log.debug('Connecting to: %s', db) con = sqlite3.connect(db) tables = _list_tables(con) if queue not in tables: _create_table(con, queue) return con def _list_tables(con): with con: cur = con.cursor() cmd = 'SELECT name FROM sqlite_master WHERE type = "table"' log.debug('SQL Query: %s', cmd) cur.execute(cmd) result = cur.fetchall() return [x[0] for x in result] def _create_table(con, queue): with con: cur = con.cursor() cmd = 'CREATE TABLE {0}(id INTEGER PRIMARY KEY, '\ 'name TEXT UNIQUE)'.format(queue) log.debug('SQL Query: %s', cmd) cur.execute(cmd) return True def _list_items(queue): ''' Private function to list contents of a queue ''' con = _conn(queue) with con: cur = con.cursor() cmd = 'SELECT name FROM {0}'.format(queue) log.debug('SQL Query: %s', cmd) cur.execute(cmd) contents = cur.fetchall() return contents def _list_queues(): ''' Return a list of sqlite databases in the queue_dir ''' queue_dir = __opts__['sqlite_queue_dir'] files = os.path.join(queue_dir, '*.db') paths = glob.glob(files) queues = [os.path.splitext(os.path.basename(item))[0] for item in paths] return queues def list_queues(): ''' Return a list of Salt Queues on the Salt Master ''' queues = _list_queues() return queues def list_items(queue): ''' List contents of a queue ''' itemstuple = _list_items(queue) items = [item[0] for item in itemstuple] return items def list_length(queue): ''' Provide the number of items in a queue ''' items = _list_items(queue) return len(items) def _quote_escape(item): ''' Make sure single quotes are escaped properly in sqlite3 fashion. e.g.: ' becomes '' ''' rex_sqlquote = re.compile("'", re.M) return rex_sqlquote.sub("''", item) def insert(queue, items): ''' Add an item or items to a queue ''' con = _conn(queue) with con: cur = con.cursor() if isinstance(items, six.string_types): items = _quote_escape(items) cmd = '''INSERT INTO {0}(name) VALUES('{1}')'''.format(queue, items) log.debug('SQL Query: %s', cmd) try: cur.execute(cmd) except sqlite3.IntegrityError as esc: return('Item already exists in this queue. ' 'sqlite error: {0}'.format(esc)) if isinstance(items, list): items = [_quote_escape(el) for el in items] cmd = "INSERT INTO {0}(name) VALUES(?)".format(queue) log.debug('SQL Query: %s', cmd) newitems = [] for item in items: newitems.append((item,)) # we need a list of one item tuples here try: cur.executemany(cmd, newitems) except sqlite3.IntegrityError as esc: return('One or more items already exists in this queue. ' 'sqlite error: {0}'.format(esc)) if isinstance(items, dict): items = salt.utils.json.dumps(items).replace('"', "'") items = _quote_escape(items) cmd = str('''INSERT INTO {0}(name) VALUES('{1}')''').format(queue, items) # future lint: disable=blacklisted-function log.debug('SQL Query: %s', cmd) try: cur.execute(cmd) except sqlite3.IntegrityError as esc: return('Item already exists in this queue. ' 'sqlite error: {0}'.format(esc)) return True def pop(queue, quantity=1, is_runner=False): ''' Pop one or more or all items from the queue return them. ''' cmd = 'SELECT name FROM {0}'.format(queue) if quantity != 'all': try: quantity = int(quantity) except ValueError as exc: error_txt = ('Quantity must be an integer or "all".\n' 'Error: "{0}".'.format(exc)) raise SaltInvocationError(error_txt) cmd = ''.join([cmd, ' LIMIT {0}'.format(quantity)]) log.debug('SQL Query: %s', cmd) con = _conn(queue) items = [] with con: cur = con.cursor() result = cur.execute(cmd).fetchall() if result: items = [item[0] for item in result] itemlist = '","'.join(items) _quote_escape(itemlist) del_cmd = '''DELETE FROM {0} WHERE name IN ("{1}")'''.format( queue, itemlist) log.debug('SQL Query: %s', del_cmd) cur.execute(del_cmd) con.commit() if is_runner: items = [salt.utils.json.loads(item[0].replace("'", '"')) for item in result] log.info(items) return items
saltstack/salt
salt/queues/sqlite_queue.py
pop
python
def pop(queue, quantity=1, is_runner=False): ''' Pop one or more or all items from the queue return them. ''' cmd = 'SELECT name FROM {0}'.format(queue) if quantity != 'all': try: quantity = int(quantity) except ValueError as exc: error_txt = ('Quantity must be an integer or "all".\n' 'Error: "{0}".'.format(exc)) raise SaltInvocationError(error_txt) cmd = ''.join([cmd, ' LIMIT {0}'.format(quantity)]) log.debug('SQL Query: %s', cmd) con = _conn(queue) items = [] with con: cur = con.cursor() result = cur.execute(cmd).fetchall() if result: items = [item[0] for item in result] itemlist = '","'.join(items) _quote_escape(itemlist) del_cmd = '''DELETE FROM {0} WHERE name IN ("{1}")'''.format( queue, itemlist) log.debug('SQL Query: %s', del_cmd) cur.execute(del_cmd) con.commit() if is_runner: items = [salt.utils.json.loads(item[0].replace("'", '"')) for item in result] log.info(items) return items
Pop one or more or all items from the queue return them.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/queues/sqlite_queue.py#L211-L244
[ "def _conn(queue):\n '''\n Return an sqlite connection\n '''\n queue_dir = __opts__['sqlite_queue_dir']\n db = os.path.join(queue_dir, '{0}.db'.format(queue))\n log.debug('Connecting to: %s', db)\n\n con = sqlite3.connect(db)\n tables = _list_tables(con)\n if queue not in tables:\n _create_table(con, queue)\n return con\n", "def _quote_escape(item):\n '''\n Make sure single quotes are escaped properly in sqlite3 fashion.\n e.g.: ' becomes ''\n '''\n\n rex_sqlquote = re.compile(\"'\", re.M)\n\n return rex_sqlquote.sub(\"''\", item)\n" ]
# -*- coding: utf-8 -*- ''' .. versionadded:: 2014.7.0 This is the default local master event queue built on sqlite. By default, an sqlite3 database file is created in the `sqlite_queue_dir` which is found at:: /var/cache/salt/master/queues It's possible to store the sqlite3 database files by setting `sqlite_queue_dir` to another location:: sqlite_queue_dir: /home/myuser/salt/master/queues ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import glob import logging import os import re import sqlite3 import salt.utils.json from salt.exceptions import SaltInvocationError # Import 3rd-party libs from salt.ext import six log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'sqlite' def __virtual__(): # All python servers should have sqlite3 and so be able to use # this default sqlite queue system return __virtualname__ def _conn(queue): ''' Return an sqlite connection ''' queue_dir = __opts__['sqlite_queue_dir'] db = os.path.join(queue_dir, '{0}.db'.format(queue)) log.debug('Connecting to: %s', db) con = sqlite3.connect(db) tables = _list_tables(con) if queue not in tables: _create_table(con, queue) return con def _list_tables(con): with con: cur = con.cursor() cmd = 'SELECT name FROM sqlite_master WHERE type = "table"' log.debug('SQL Query: %s', cmd) cur.execute(cmd) result = cur.fetchall() return [x[0] for x in result] def _create_table(con, queue): with con: cur = con.cursor() cmd = 'CREATE TABLE {0}(id INTEGER PRIMARY KEY, '\ 'name TEXT UNIQUE)'.format(queue) log.debug('SQL Query: %s', cmd) cur.execute(cmd) return True def _list_items(queue): ''' Private function to list contents of a queue ''' con = _conn(queue) with con: cur = con.cursor() cmd = 'SELECT name FROM {0}'.format(queue) log.debug('SQL Query: %s', cmd) cur.execute(cmd) contents = cur.fetchall() return contents def _list_queues(): ''' Return a list of sqlite databases in the queue_dir ''' queue_dir = __opts__['sqlite_queue_dir'] files = os.path.join(queue_dir, '*.db') paths = glob.glob(files) queues = [os.path.splitext(os.path.basename(item))[0] for item in paths] return queues def list_queues(): ''' Return a list of Salt Queues on the Salt Master ''' queues = _list_queues() return queues def list_items(queue): ''' List contents of a queue ''' itemstuple = _list_items(queue) items = [item[0] for item in itemstuple] return items def list_length(queue): ''' Provide the number of items in a queue ''' items = _list_items(queue) return len(items) def _quote_escape(item): ''' Make sure single quotes are escaped properly in sqlite3 fashion. e.g.: ' becomes '' ''' rex_sqlquote = re.compile("'", re.M) return rex_sqlquote.sub("''", item) def insert(queue, items): ''' Add an item or items to a queue ''' con = _conn(queue) with con: cur = con.cursor() if isinstance(items, six.string_types): items = _quote_escape(items) cmd = '''INSERT INTO {0}(name) VALUES('{1}')'''.format(queue, items) log.debug('SQL Query: %s', cmd) try: cur.execute(cmd) except sqlite3.IntegrityError as esc: return('Item already exists in this queue. ' 'sqlite error: {0}'.format(esc)) if isinstance(items, list): items = [_quote_escape(el) for el in items] cmd = "INSERT INTO {0}(name) VALUES(?)".format(queue) log.debug('SQL Query: %s', cmd) newitems = [] for item in items: newitems.append((item,)) # we need a list of one item tuples here try: cur.executemany(cmd, newitems) except sqlite3.IntegrityError as esc: return('One or more items already exists in this queue. ' 'sqlite error: {0}'.format(esc)) if isinstance(items, dict): items = salt.utils.json.dumps(items).replace('"', "'") items = _quote_escape(items) cmd = str('''INSERT INTO {0}(name) VALUES('{1}')''').format(queue, items) # future lint: disable=blacklisted-function log.debug('SQL Query: %s', cmd) try: cur.execute(cmd) except sqlite3.IntegrityError as esc: return('Item already exists in this queue. ' 'sqlite error: {0}'.format(esc)) return True def delete(queue, items): ''' Delete an item or items from a queue ''' con = _conn(queue) with con: cur = con.cursor() if isinstance(items, six.string_types): items = _quote_escape(items) cmd = """DELETE FROM {0} WHERE name = '{1}'""".format(queue, items) log.debug('SQL Query: %s', cmd) cur.execute(cmd) return True if isinstance(items, list): items = [_quote_escape(el) for el in items] cmd = 'DELETE FROM {0} WHERE name = ?'.format(queue) log.debug('SQL Query: %s', cmd) newitems = [] for item in items: newitems.append((item,)) # we need a list of one item tuples here cur.executemany(cmd, newitems) if isinstance(items, dict): items = salt.utils.json.dumps(items).replace('"', "'") items = _quote_escape(items) cmd = ("""DELETE FROM {0} WHERE name = '{1}'""").format(queue, items) # future lint: disable=blacklisted-function log.debug('SQL Query: %s', cmd) cur.execute(cmd) return True return True
saltstack/salt
salt/modules/boto_sns.py
get_all_topics
python
def get_all_topics(region=None, key=None, keyid=None, profile=None): ''' Returns a list of the all topics.. CLI example:: salt myminion boto_sns.get_all_topics ''' cache_key = _cache_get_key() try: return __context__[cache_key] except KeyError: pass conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) __context__[cache_key] = {} # TODO: support >100 SNS topics (via NextToken) topics = conn.get_all_topics() for t in topics['ListTopicsResponse']['ListTopicsResult']['Topics']: short_name = t['TopicArn'].split(':')[-1] __context__[cache_key][short_name] = t['TopicArn'] return __context__[cache_key]
Returns a list of the all topics.. CLI example:: salt myminion boto_sns.get_all_topics
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_sns.py#L78-L99
[ "def _cache_get_key():\n return 'boto_sns.topics_cache'\n" ]
# -*- coding: utf-8 -*- ''' Connection module for Amazon SNS :configuration: This module accepts explicit sns credentials but can also utilize IAM roles assigned to the instance through Instance Profiles. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More Information available at: .. code-block:: text http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html If IAM roles are not used you need to specify them either in a pillar or in the minion's config file: .. code-block:: yaml sns.keyid: GKTADJGHEIQSXMKKRBJ08H sns.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs A region may also be specified in the configuration: .. code-block:: yaml sns.region: us-east-1 If a region is not specified, the default is us-east-1. It's also possible to specify key, keyid and region via a profile, either as a passed in dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 :depends: boto ''' # keep lint from choking on _get_conn and _cache_id #pylint: disable=E0602 # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import logging # Import Salt libs import salt.utils.versions log = logging.getLogger(__name__) # Import third party libs try: #pylint: disable=unused-import import boto import boto.sns #pylint: enable=unused-import logging.getLogger('boto').setLevel(logging.CRITICAL) HAS_BOTO = True except ImportError: HAS_BOTO = False def __virtual__(): ''' Only load if boto libraries exist. ''' has_boto_reqs = salt.utils.versions.check_boto_reqs( check_boto3=False ) if has_boto_reqs is True: __utils__['boto.assign_funcs'](__name__, 'sns', pack=__salt__) return has_boto_reqs def exists(name, region=None, key=None, keyid=None, profile=None): ''' Check to see if an SNS topic exists. CLI example:: salt myminion boto_sns.exists mytopic region=us-east-1 ''' topics = get_all_topics(region=region, key=key, keyid=keyid, profile=profile) if name.startswith('arn:aws:sns:'): return name in list(topics.values()) else: return name in list(topics.keys()) def create(name, region=None, key=None, keyid=None, profile=None): ''' Create an SNS topic. CLI example to create a topic:: salt myminion boto_sns.create mytopic region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) conn.create_topic(name) log.info('Created SNS topic %s', name) _invalidate_cache() return True def delete(name, region=None, key=None, keyid=None, profile=None): ''' Delete an SNS topic. CLI example to delete a topic:: salt myminion boto_sns.delete mytopic region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) conn.delete_topic(get_arn(name, region, key, keyid, profile)) log.info('Deleted SNS topic %s', name) _invalidate_cache() return True def get_all_subscriptions_by_topic(name, region=None, key=None, keyid=None, profile=None): ''' Get list of all subscriptions to a specific topic. CLI example to delete a topic:: salt myminion boto_sns.get_all_subscriptions_by_topic mytopic region=us-east-1 ''' cache_key = _subscriptions_cache_key(name) try: return __context__[cache_key] except KeyError: pass conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) ret = conn.get_all_subscriptions_by_topic(get_arn(name, region, key, keyid, profile)) __context__[cache_key] = ret['ListSubscriptionsByTopicResponse']['ListSubscriptionsByTopicResult']['Subscriptions'] return __context__[cache_key] def subscribe(topic, protocol, endpoint, region=None, key=None, keyid=None, profile=None): ''' Subscribe to a Topic. CLI example to delete a topic:: salt myminion boto_sns.subscribe mytopic https https://www.example.com/sns-endpoint region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) conn.subscribe(get_arn(topic, region, key, keyid, profile), protocol, endpoint) log.info('Subscribe %s %s to %s topic', protocol, endpoint, topic) try: del __context__[_subscriptions_cache_key(topic)] except KeyError: pass return True def unsubscribe(topic, subscription_arn, region=None, key=None, keyid=None, profile=None): ''' Unsubscribe a specific SubscriptionArn of a topic. CLI Example: .. code-block:: bash salt myminion boto_sns.unsubscribe my_topic my_subscription_arn region=us-east-1 .. versionadded:: 2016.11.0 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if subscription_arn.startswith('arn:aws:sns:') is False: return False try: conn.unsubscribe(subscription_arn) log.info('Unsubscribe %s to %s topic', subscription_arn, topic) except Exception as e: log.error('Unsubscribe Error', exc_info=True) return False else: __context__.pop(_subscriptions_cache_key(topic), None) return True def get_arn(name, region=None, key=None, keyid=None, profile=None): ''' Returns the full ARN for a given topic name. CLI example:: salt myminion boto_sns.get_arn mytopic ''' if name.startswith('arn:aws:sns:'): return name account_id = __salt__['boto_iam.get_account_id']( region=region, key=key, keyid=keyid, profile=profile ) return 'arn:aws:sns:{0}:{1}:{2}'.format(_get_region(region, profile), account_id, name) def _get_region(region=None, profile=None): if profile and 'region' in profile: return profile['region'] if not region and __salt__['config.option'](profile): _profile = __salt__['config.option'](profile) region = _profile.get('region', None) if not region and __salt__['config.option']('sns.region'): region = __salt__['config.option']('sns.region') if not region: region = 'us-east-1' return region def _subscriptions_cache_key(name): return '{0}_{1}_subscriptions'.format(_cache_get_key(), name) def _invalidate_cache(): try: del __context__[_cache_get_key()] except KeyError: pass def _cache_get_key(): return 'boto_sns.topics_cache'
saltstack/salt
salt/modules/boto_sns.py
exists
python
def exists(name, region=None, key=None, keyid=None, profile=None): ''' Check to see if an SNS topic exists. CLI example:: salt myminion boto_sns.exists mytopic region=us-east-1 ''' topics = get_all_topics(region=region, key=key, keyid=keyid, profile=profile) if name.startswith('arn:aws:sns:'): return name in list(topics.values()) else: return name in list(topics.keys())
Check to see if an SNS topic exists. CLI example:: salt myminion boto_sns.exists mytopic region=us-east-1
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_sns.py#L102-L115
[ "def get_all_topics(region=None, key=None, keyid=None, profile=None):\n '''\n Returns a list of the all topics..\n\n CLI example::\n\n salt myminion boto_sns.get_all_topics\n '''\n cache_key = _cache_get_key()\n try:\n return __context__[cache_key]\n except KeyError:\n pass\n\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n __context__[cache_key] = {}\n # TODO: support >100 SNS topics (via NextToken)\n topics = conn.get_all_topics()\n for t in topics['ListTopicsResponse']['ListTopicsResult']['Topics']:\n short_name = t['TopicArn'].split(':')[-1]\n __context__[cache_key][short_name] = t['TopicArn']\n return __context__[cache_key]\n" ]
# -*- coding: utf-8 -*- ''' Connection module for Amazon SNS :configuration: This module accepts explicit sns credentials but can also utilize IAM roles assigned to the instance through Instance Profiles. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More Information available at: .. code-block:: text http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html If IAM roles are not used you need to specify them either in a pillar or in the minion's config file: .. code-block:: yaml sns.keyid: GKTADJGHEIQSXMKKRBJ08H sns.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs A region may also be specified in the configuration: .. code-block:: yaml sns.region: us-east-1 If a region is not specified, the default is us-east-1. It's also possible to specify key, keyid and region via a profile, either as a passed in dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 :depends: boto ''' # keep lint from choking on _get_conn and _cache_id #pylint: disable=E0602 # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import logging # Import Salt libs import salt.utils.versions log = logging.getLogger(__name__) # Import third party libs try: #pylint: disable=unused-import import boto import boto.sns #pylint: enable=unused-import logging.getLogger('boto').setLevel(logging.CRITICAL) HAS_BOTO = True except ImportError: HAS_BOTO = False def __virtual__(): ''' Only load if boto libraries exist. ''' has_boto_reqs = salt.utils.versions.check_boto_reqs( check_boto3=False ) if has_boto_reqs is True: __utils__['boto.assign_funcs'](__name__, 'sns', pack=__salt__) return has_boto_reqs def get_all_topics(region=None, key=None, keyid=None, profile=None): ''' Returns a list of the all topics.. CLI example:: salt myminion boto_sns.get_all_topics ''' cache_key = _cache_get_key() try: return __context__[cache_key] except KeyError: pass conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) __context__[cache_key] = {} # TODO: support >100 SNS topics (via NextToken) topics = conn.get_all_topics() for t in topics['ListTopicsResponse']['ListTopicsResult']['Topics']: short_name = t['TopicArn'].split(':')[-1] __context__[cache_key][short_name] = t['TopicArn'] return __context__[cache_key] def create(name, region=None, key=None, keyid=None, profile=None): ''' Create an SNS topic. CLI example to create a topic:: salt myminion boto_sns.create mytopic region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) conn.create_topic(name) log.info('Created SNS topic %s', name) _invalidate_cache() return True def delete(name, region=None, key=None, keyid=None, profile=None): ''' Delete an SNS topic. CLI example to delete a topic:: salt myminion boto_sns.delete mytopic region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) conn.delete_topic(get_arn(name, region, key, keyid, profile)) log.info('Deleted SNS topic %s', name) _invalidate_cache() return True def get_all_subscriptions_by_topic(name, region=None, key=None, keyid=None, profile=None): ''' Get list of all subscriptions to a specific topic. CLI example to delete a topic:: salt myminion boto_sns.get_all_subscriptions_by_topic mytopic region=us-east-1 ''' cache_key = _subscriptions_cache_key(name) try: return __context__[cache_key] except KeyError: pass conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) ret = conn.get_all_subscriptions_by_topic(get_arn(name, region, key, keyid, profile)) __context__[cache_key] = ret['ListSubscriptionsByTopicResponse']['ListSubscriptionsByTopicResult']['Subscriptions'] return __context__[cache_key] def subscribe(topic, protocol, endpoint, region=None, key=None, keyid=None, profile=None): ''' Subscribe to a Topic. CLI example to delete a topic:: salt myminion boto_sns.subscribe mytopic https https://www.example.com/sns-endpoint region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) conn.subscribe(get_arn(topic, region, key, keyid, profile), protocol, endpoint) log.info('Subscribe %s %s to %s topic', protocol, endpoint, topic) try: del __context__[_subscriptions_cache_key(topic)] except KeyError: pass return True def unsubscribe(topic, subscription_arn, region=None, key=None, keyid=None, profile=None): ''' Unsubscribe a specific SubscriptionArn of a topic. CLI Example: .. code-block:: bash salt myminion boto_sns.unsubscribe my_topic my_subscription_arn region=us-east-1 .. versionadded:: 2016.11.0 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if subscription_arn.startswith('arn:aws:sns:') is False: return False try: conn.unsubscribe(subscription_arn) log.info('Unsubscribe %s to %s topic', subscription_arn, topic) except Exception as e: log.error('Unsubscribe Error', exc_info=True) return False else: __context__.pop(_subscriptions_cache_key(topic), None) return True def get_arn(name, region=None, key=None, keyid=None, profile=None): ''' Returns the full ARN for a given topic name. CLI example:: salt myminion boto_sns.get_arn mytopic ''' if name.startswith('arn:aws:sns:'): return name account_id = __salt__['boto_iam.get_account_id']( region=region, key=key, keyid=keyid, profile=profile ) return 'arn:aws:sns:{0}:{1}:{2}'.format(_get_region(region, profile), account_id, name) def _get_region(region=None, profile=None): if profile and 'region' in profile: return profile['region'] if not region and __salt__['config.option'](profile): _profile = __salt__['config.option'](profile) region = _profile.get('region', None) if not region and __salt__['config.option']('sns.region'): region = __salt__['config.option']('sns.region') if not region: region = 'us-east-1' return region def _subscriptions_cache_key(name): return '{0}_{1}_subscriptions'.format(_cache_get_key(), name) def _invalidate_cache(): try: del __context__[_cache_get_key()] except KeyError: pass def _cache_get_key(): return 'boto_sns.topics_cache'
saltstack/salt
salt/modules/boto_sns.py
create
python
def create(name, region=None, key=None, keyid=None, profile=None): ''' Create an SNS topic. CLI example to create a topic:: salt myminion boto_sns.create mytopic region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) conn.create_topic(name) log.info('Created SNS topic %s', name) _invalidate_cache() return True
Create an SNS topic. CLI example to create a topic:: salt myminion boto_sns.create mytopic region=us-east-1
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_sns.py#L118-L130
[ "def _invalidate_cache():\n try:\n del __context__[_cache_get_key()]\n except KeyError:\n pass\n" ]
# -*- coding: utf-8 -*- ''' Connection module for Amazon SNS :configuration: This module accepts explicit sns credentials but can also utilize IAM roles assigned to the instance through Instance Profiles. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More Information available at: .. code-block:: text http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html If IAM roles are not used you need to specify them either in a pillar or in the minion's config file: .. code-block:: yaml sns.keyid: GKTADJGHEIQSXMKKRBJ08H sns.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs A region may also be specified in the configuration: .. code-block:: yaml sns.region: us-east-1 If a region is not specified, the default is us-east-1. It's also possible to specify key, keyid and region via a profile, either as a passed in dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 :depends: boto ''' # keep lint from choking on _get_conn and _cache_id #pylint: disable=E0602 # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import logging # Import Salt libs import salt.utils.versions log = logging.getLogger(__name__) # Import third party libs try: #pylint: disable=unused-import import boto import boto.sns #pylint: enable=unused-import logging.getLogger('boto').setLevel(logging.CRITICAL) HAS_BOTO = True except ImportError: HAS_BOTO = False def __virtual__(): ''' Only load if boto libraries exist. ''' has_boto_reqs = salt.utils.versions.check_boto_reqs( check_boto3=False ) if has_boto_reqs is True: __utils__['boto.assign_funcs'](__name__, 'sns', pack=__salt__) return has_boto_reqs def get_all_topics(region=None, key=None, keyid=None, profile=None): ''' Returns a list of the all topics.. CLI example:: salt myminion boto_sns.get_all_topics ''' cache_key = _cache_get_key() try: return __context__[cache_key] except KeyError: pass conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) __context__[cache_key] = {} # TODO: support >100 SNS topics (via NextToken) topics = conn.get_all_topics() for t in topics['ListTopicsResponse']['ListTopicsResult']['Topics']: short_name = t['TopicArn'].split(':')[-1] __context__[cache_key][short_name] = t['TopicArn'] return __context__[cache_key] def exists(name, region=None, key=None, keyid=None, profile=None): ''' Check to see if an SNS topic exists. CLI example:: salt myminion boto_sns.exists mytopic region=us-east-1 ''' topics = get_all_topics(region=region, key=key, keyid=keyid, profile=profile) if name.startswith('arn:aws:sns:'): return name in list(topics.values()) else: return name in list(topics.keys()) def delete(name, region=None, key=None, keyid=None, profile=None): ''' Delete an SNS topic. CLI example to delete a topic:: salt myminion boto_sns.delete mytopic region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) conn.delete_topic(get_arn(name, region, key, keyid, profile)) log.info('Deleted SNS topic %s', name) _invalidate_cache() return True def get_all_subscriptions_by_topic(name, region=None, key=None, keyid=None, profile=None): ''' Get list of all subscriptions to a specific topic. CLI example to delete a topic:: salt myminion boto_sns.get_all_subscriptions_by_topic mytopic region=us-east-1 ''' cache_key = _subscriptions_cache_key(name) try: return __context__[cache_key] except KeyError: pass conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) ret = conn.get_all_subscriptions_by_topic(get_arn(name, region, key, keyid, profile)) __context__[cache_key] = ret['ListSubscriptionsByTopicResponse']['ListSubscriptionsByTopicResult']['Subscriptions'] return __context__[cache_key] def subscribe(topic, protocol, endpoint, region=None, key=None, keyid=None, profile=None): ''' Subscribe to a Topic. CLI example to delete a topic:: salt myminion boto_sns.subscribe mytopic https https://www.example.com/sns-endpoint region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) conn.subscribe(get_arn(topic, region, key, keyid, profile), protocol, endpoint) log.info('Subscribe %s %s to %s topic', protocol, endpoint, topic) try: del __context__[_subscriptions_cache_key(topic)] except KeyError: pass return True def unsubscribe(topic, subscription_arn, region=None, key=None, keyid=None, profile=None): ''' Unsubscribe a specific SubscriptionArn of a topic. CLI Example: .. code-block:: bash salt myminion boto_sns.unsubscribe my_topic my_subscription_arn region=us-east-1 .. versionadded:: 2016.11.0 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if subscription_arn.startswith('arn:aws:sns:') is False: return False try: conn.unsubscribe(subscription_arn) log.info('Unsubscribe %s to %s topic', subscription_arn, topic) except Exception as e: log.error('Unsubscribe Error', exc_info=True) return False else: __context__.pop(_subscriptions_cache_key(topic), None) return True def get_arn(name, region=None, key=None, keyid=None, profile=None): ''' Returns the full ARN for a given topic name. CLI example:: salt myminion boto_sns.get_arn mytopic ''' if name.startswith('arn:aws:sns:'): return name account_id = __salt__['boto_iam.get_account_id']( region=region, key=key, keyid=keyid, profile=profile ) return 'arn:aws:sns:{0}:{1}:{2}'.format(_get_region(region, profile), account_id, name) def _get_region(region=None, profile=None): if profile and 'region' in profile: return profile['region'] if not region and __salt__['config.option'](profile): _profile = __salt__['config.option'](profile) region = _profile.get('region', None) if not region and __salt__['config.option']('sns.region'): region = __salt__['config.option']('sns.region') if not region: region = 'us-east-1' return region def _subscriptions_cache_key(name): return '{0}_{1}_subscriptions'.format(_cache_get_key(), name) def _invalidate_cache(): try: del __context__[_cache_get_key()] except KeyError: pass def _cache_get_key(): return 'boto_sns.topics_cache'
saltstack/salt
salt/modules/boto_sns.py
delete
python
def delete(name, region=None, key=None, keyid=None, profile=None): ''' Delete an SNS topic. CLI example to delete a topic:: salt myminion boto_sns.delete mytopic region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) conn.delete_topic(get_arn(name, region, key, keyid, profile)) log.info('Deleted SNS topic %s', name) _invalidate_cache() return True
Delete an SNS topic. CLI example to delete a topic:: salt myminion boto_sns.delete mytopic region=us-east-1
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_sns.py#L133-L145
[ "def _invalidate_cache():\n try:\n del __context__[_cache_get_key()]\n except KeyError:\n pass\n", "def get_arn(name, region=None, key=None, keyid=None, profile=None):\n '''\n Returns the full ARN for a given topic name.\n\n CLI example::\n\n salt myminion boto_sns.get_arn mytopic\n '''\n if name.startswith('arn:aws:sns:'):\n return name\n\n account_id = __salt__['boto_iam.get_account_id'](\n region=region, key=key, keyid=keyid, profile=profile\n )\n return 'arn:aws:sns:{0}:{1}:{2}'.format(_get_region(region, profile),\n account_id, name)\n" ]
# -*- coding: utf-8 -*- ''' Connection module for Amazon SNS :configuration: This module accepts explicit sns credentials but can also utilize IAM roles assigned to the instance through Instance Profiles. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More Information available at: .. code-block:: text http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html If IAM roles are not used you need to specify them either in a pillar or in the minion's config file: .. code-block:: yaml sns.keyid: GKTADJGHEIQSXMKKRBJ08H sns.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs A region may also be specified in the configuration: .. code-block:: yaml sns.region: us-east-1 If a region is not specified, the default is us-east-1. It's also possible to specify key, keyid and region via a profile, either as a passed in dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 :depends: boto ''' # keep lint from choking on _get_conn and _cache_id #pylint: disable=E0602 # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import logging # Import Salt libs import salt.utils.versions log = logging.getLogger(__name__) # Import third party libs try: #pylint: disable=unused-import import boto import boto.sns #pylint: enable=unused-import logging.getLogger('boto').setLevel(logging.CRITICAL) HAS_BOTO = True except ImportError: HAS_BOTO = False def __virtual__(): ''' Only load if boto libraries exist. ''' has_boto_reqs = salt.utils.versions.check_boto_reqs( check_boto3=False ) if has_boto_reqs is True: __utils__['boto.assign_funcs'](__name__, 'sns', pack=__salt__) return has_boto_reqs def get_all_topics(region=None, key=None, keyid=None, profile=None): ''' Returns a list of the all topics.. CLI example:: salt myminion boto_sns.get_all_topics ''' cache_key = _cache_get_key() try: return __context__[cache_key] except KeyError: pass conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) __context__[cache_key] = {} # TODO: support >100 SNS topics (via NextToken) topics = conn.get_all_topics() for t in topics['ListTopicsResponse']['ListTopicsResult']['Topics']: short_name = t['TopicArn'].split(':')[-1] __context__[cache_key][short_name] = t['TopicArn'] return __context__[cache_key] def exists(name, region=None, key=None, keyid=None, profile=None): ''' Check to see if an SNS topic exists. CLI example:: salt myminion boto_sns.exists mytopic region=us-east-1 ''' topics = get_all_topics(region=region, key=key, keyid=keyid, profile=profile) if name.startswith('arn:aws:sns:'): return name in list(topics.values()) else: return name in list(topics.keys()) def create(name, region=None, key=None, keyid=None, profile=None): ''' Create an SNS topic. CLI example to create a topic:: salt myminion boto_sns.create mytopic region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) conn.create_topic(name) log.info('Created SNS topic %s', name) _invalidate_cache() return True def get_all_subscriptions_by_topic(name, region=None, key=None, keyid=None, profile=None): ''' Get list of all subscriptions to a specific topic. CLI example to delete a topic:: salt myminion boto_sns.get_all_subscriptions_by_topic mytopic region=us-east-1 ''' cache_key = _subscriptions_cache_key(name) try: return __context__[cache_key] except KeyError: pass conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) ret = conn.get_all_subscriptions_by_topic(get_arn(name, region, key, keyid, profile)) __context__[cache_key] = ret['ListSubscriptionsByTopicResponse']['ListSubscriptionsByTopicResult']['Subscriptions'] return __context__[cache_key] def subscribe(topic, protocol, endpoint, region=None, key=None, keyid=None, profile=None): ''' Subscribe to a Topic. CLI example to delete a topic:: salt myminion boto_sns.subscribe mytopic https https://www.example.com/sns-endpoint region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) conn.subscribe(get_arn(topic, region, key, keyid, profile), protocol, endpoint) log.info('Subscribe %s %s to %s topic', protocol, endpoint, topic) try: del __context__[_subscriptions_cache_key(topic)] except KeyError: pass return True def unsubscribe(topic, subscription_arn, region=None, key=None, keyid=None, profile=None): ''' Unsubscribe a specific SubscriptionArn of a topic. CLI Example: .. code-block:: bash salt myminion boto_sns.unsubscribe my_topic my_subscription_arn region=us-east-1 .. versionadded:: 2016.11.0 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if subscription_arn.startswith('arn:aws:sns:') is False: return False try: conn.unsubscribe(subscription_arn) log.info('Unsubscribe %s to %s topic', subscription_arn, topic) except Exception as e: log.error('Unsubscribe Error', exc_info=True) return False else: __context__.pop(_subscriptions_cache_key(topic), None) return True def get_arn(name, region=None, key=None, keyid=None, profile=None): ''' Returns the full ARN for a given topic name. CLI example:: salt myminion boto_sns.get_arn mytopic ''' if name.startswith('arn:aws:sns:'): return name account_id = __salt__['boto_iam.get_account_id']( region=region, key=key, keyid=keyid, profile=profile ) return 'arn:aws:sns:{0}:{1}:{2}'.format(_get_region(region, profile), account_id, name) def _get_region(region=None, profile=None): if profile and 'region' in profile: return profile['region'] if not region and __salt__['config.option'](profile): _profile = __salt__['config.option'](profile) region = _profile.get('region', None) if not region and __salt__['config.option']('sns.region'): region = __salt__['config.option']('sns.region') if not region: region = 'us-east-1' return region def _subscriptions_cache_key(name): return '{0}_{1}_subscriptions'.format(_cache_get_key(), name) def _invalidate_cache(): try: del __context__[_cache_get_key()] except KeyError: pass def _cache_get_key(): return 'boto_sns.topics_cache'
saltstack/salt
salt/modules/boto_sns.py
get_all_subscriptions_by_topic
python
def get_all_subscriptions_by_topic(name, region=None, key=None, keyid=None, profile=None): ''' Get list of all subscriptions to a specific topic. CLI example to delete a topic:: salt myminion boto_sns.get_all_subscriptions_by_topic mytopic region=us-east-1 ''' cache_key = _subscriptions_cache_key(name) try: return __context__[cache_key] except KeyError: pass conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) ret = conn.get_all_subscriptions_by_topic(get_arn(name, region, key, keyid, profile)) __context__[cache_key] = ret['ListSubscriptionsByTopicResponse']['ListSubscriptionsByTopicResult']['Subscriptions'] return __context__[cache_key]
Get list of all subscriptions to a specific topic. CLI example to delete a topic:: salt myminion boto_sns.get_all_subscriptions_by_topic mytopic region=us-east-1
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_sns.py#L148-L165
[ "def get_arn(name, region=None, key=None, keyid=None, profile=None):\n '''\n Returns the full ARN for a given topic name.\n\n CLI example::\n\n salt myminion boto_sns.get_arn mytopic\n '''\n if name.startswith('arn:aws:sns:'):\n return name\n\n account_id = __salt__['boto_iam.get_account_id'](\n region=region, key=key, keyid=keyid, profile=profile\n )\n return 'arn:aws:sns:{0}:{1}:{2}'.format(_get_region(region, profile),\n account_id, name)\n", "def _subscriptions_cache_key(name):\n return '{0}_{1}_subscriptions'.format(_cache_get_key(), name)\n" ]
# -*- coding: utf-8 -*- ''' Connection module for Amazon SNS :configuration: This module accepts explicit sns credentials but can also utilize IAM roles assigned to the instance through Instance Profiles. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More Information available at: .. code-block:: text http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html If IAM roles are not used you need to specify them either in a pillar or in the minion's config file: .. code-block:: yaml sns.keyid: GKTADJGHEIQSXMKKRBJ08H sns.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs A region may also be specified in the configuration: .. code-block:: yaml sns.region: us-east-1 If a region is not specified, the default is us-east-1. It's also possible to specify key, keyid and region via a profile, either as a passed in dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 :depends: boto ''' # keep lint from choking on _get_conn and _cache_id #pylint: disable=E0602 # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import logging # Import Salt libs import salt.utils.versions log = logging.getLogger(__name__) # Import third party libs try: #pylint: disable=unused-import import boto import boto.sns #pylint: enable=unused-import logging.getLogger('boto').setLevel(logging.CRITICAL) HAS_BOTO = True except ImportError: HAS_BOTO = False def __virtual__(): ''' Only load if boto libraries exist. ''' has_boto_reqs = salt.utils.versions.check_boto_reqs( check_boto3=False ) if has_boto_reqs is True: __utils__['boto.assign_funcs'](__name__, 'sns', pack=__salt__) return has_boto_reqs def get_all_topics(region=None, key=None, keyid=None, profile=None): ''' Returns a list of the all topics.. CLI example:: salt myminion boto_sns.get_all_topics ''' cache_key = _cache_get_key() try: return __context__[cache_key] except KeyError: pass conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) __context__[cache_key] = {} # TODO: support >100 SNS topics (via NextToken) topics = conn.get_all_topics() for t in topics['ListTopicsResponse']['ListTopicsResult']['Topics']: short_name = t['TopicArn'].split(':')[-1] __context__[cache_key][short_name] = t['TopicArn'] return __context__[cache_key] def exists(name, region=None, key=None, keyid=None, profile=None): ''' Check to see if an SNS topic exists. CLI example:: salt myminion boto_sns.exists mytopic region=us-east-1 ''' topics = get_all_topics(region=region, key=key, keyid=keyid, profile=profile) if name.startswith('arn:aws:sns:'): return name in list(topics.values()) else: return name in list(topics.keys()) def create(name, region=None, key=None, keyid=None, profile=None): ''' Create an SNS topic. CLI example to create a topic:: salt myminion boto_sns.create mytopic region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) conn.create_topic(name) log.info('Created SNS topic %s', name) _invalidate_cache() return True def delete(name, region=None, key=None, keyid=None, profile=None): ''' Delete an SNS topic. CLI example to delete a topic:: salt myminion boto_sns.delete mytopic region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) conn.delete_topic(get_arn(name, region, key, keyid, profile)) log.info('Deleted SNS topic %s', name) _invalidate_cache() return True def subscribe(topic, protocol, endpoint, region=None, key=None, keyid=None, profile=None): ''' Subscribe to a Topic. CLI example to delete a topic:: salt myminion boto_sns.subscribe mytopic https https://www.example.com/sns-endpoint region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) conn.subscribe(get_arn(topic, region, key, keyid, profile), protocol, endpoint) log.info('Subscribe %s %s to %s topic', protocol, endpoint, topic) try: del __context__[_subscriptions_cache_key(topic)] except KeyError: pass return True def unsubscribe(topic, subscription_arn, region=None, key=None, keyid=None, profile=None): ''' Unsubscribe a specific SubscriptionArn of a topic. CLI Example: .. code-block:: bash salt myminion boto_sns.unsubscribe my_topic my_subscription_arn region=us-east-1 .. versionadded:: 2016.11.0 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if subscription_arn.startswith('arn:aws:sns:') is False: return False try: conn.unsubscribe(subscription_arn) log.info('Unsubscribe %s to %s topic', subscription_arn, topic) except Exception as e: log.error('Unsubscribe Error', exc_info=True) return False else: __context__.pop(_subscriptions_cache_key(topic), None) return True def get_arn(name, region=None, key=None, keyid=None, profile=None): ''' Returns the full ARN for a given topic name. CLI example:: salt myminion boto_sns.get_arn mytopic ''' if name.startswith('arn:aws:sns:'): return name account_id = __salt__['boto_iam.get_account_id']( region=region, key=key, keyid=keyid, profile=profile ) return 'arn:aws:sns:{0}:{1}:{2}'.format(_get_region(region, profile), account_id, name) def _get_region(region=None, profile=None): if profile and 'region' in profile: return profile['region'] if not region and __salt__['config.option'](profile): _profile = __salt__['config.option'](profile) region = _profile.get('region', None) if not region and __salt__['config.option']('sns.region'): region = __salt__['config.option']('sns.region') if not region: region = 'us-east-1' return region def _subscriptions_cache_key(name): return '{0}_{1}_subscriptions'.format(_cache_get_key(), name) def _invalidate_cache(): try: del __context__[_cache_get_key()] except KeyError: pass def _cache_get_key(): return 'boto_sns.topics_cache'
saltstack/salt
salt/modules/boto_sns.py
subscribe
python
def subscribe(topic, protocol, endpoint, region=None, key=None, keyid=None, profile=None): ''' Subscribe to a Topic. CLI example to delete a topic:: salt myminion boto_sns.subscribe mytopic https https://www.example.com/sns-endpoint region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) conn.subscribe(get_arn(topic, region, key, keyid, profile), protocol, endpoint) log.info('Subscribe %s %s to %s topic', protocol, endpoint, topic) try: del __context__[_subscriptions_cache_key(topic)] except KeyError: pass return True
Subscribe to a Topic. CLI example to delete a topic:: salt myminion boto_sns.subscribe mytopic https https://www.example.com/sns-endpoint region=us-east-1
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_sns.py#L168-L183
[ "def get_arn(name, region=None, key=None, keyid=None, profile=None):\n '''\n Returns the full ARN for a given topic name.\n\n CLI example::\n\n salt myminion boto_sns.get_arn mytopic\n '''\n if name.startswith('arn:aws:sns:'):\n return name\n\n account_id = __salt__['boto_iam.get_account_id'](\n region=region, key=key, keyid=keyid, profile=profile\n )\n return 'arn:aws:sns:{0}:{1}:{2}'.format(_get_region(region, profile),\n account_id, name)\n", "def _subscriptions_cache_key(name):\n return '{0}_{1}_subscriptions'.format(_cache_get_key(), name)\n" ]
# -*- coding: utf-8 -*- ''' Connection module for Amazon SNS :configuration: This module accepts explicit sns credentials but can also utilize IAM roles assigned to the instance through Instance Profiles. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More Information available at: .. code-block:: text http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html If IAM roles are not used you need to specify them either in a pillar or in the minion's config file: .. code-block:: yaml sns.keyid: GKTADJGHEIQSXMKKRBJ08H sns.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs A region may also be specified in the configuration: .. code-block:: yaml sns.region: us-east-1 If a region is not specified, the default is us-east-1. It's also possible to specify key, keyid and region via a profile, either as a passed in dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 :depends: boto ''' # keep lint from choking on _get_conn and _cache_id #pylint: disable=E0602 # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import logging # Import Salt libs import salt.utils.versions log = logging.getLogger(__name__) # Import third party libs try: #pylint: disable=unused-import import boto import boto.sns #pylint: enable=unused-import logging.getLogger('boto').setLevel(logging.CRITICAL) HAS_BOTO = True except ImportError: HAS_BOTO = False def __virtual__(): ''' Only load if boto libraries exist. ''' has_boto_reqs = salt.utils.versions.check_boto_reqs( check_boto3=False ) if has_boto_reqs is True: __utils__['boto.assign_funcs'](__name__, 'sns', pack=__salt__) return has_boto_reqs def get_all_topics(region=None, key=None, keyid=None, profile=None): ''' Returns a list of the all topics.. CLI example:: salt myminion boto_sns.get_all_topics ''' cache_key = _cache_get_key() try: return __context__[cache_key] except KeyError: pass conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) __context__[cache_key] = {} # TODO: support >100 SNS topics (via NextToken) topics = conn.get_all_topics() for t in topics['ListTopicsResponse']['ListTopicsResult']['Topics']: short_name = t['TopicArn'].split(':')[-1] __context__[cache_key][short_name] = t['TopicArn'] return __context__[cache_key] def exists(name, region=None, key=None, keyid=None, profile=None): ''' Check to see if an SNS topic exists. CLI example:: salt myminion boto_sns.exists mytopic region=us-east-1 ''' topics = get_all_topics(region=region, key=key, keyid=keyid, profile=profile) if name.startswith('arn:aws:sns:'): return name in list(topics.values()) else: return name in list(topics.keys()) def create(name, region=None, key=None, keyid=None, profile=None): ''' Create an SNS topic. CLI example to create a topic:: salt myminion boto_sns.create mytopic region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) conn.create_topic(name) log.info('Created SNS topic %s', name) _invalidate_cache() return True def delete(name, region=None, key=None, keyid=None, profile=None): ''' Delete an SNS topic. CLI example to delete a topic:: salt myminion boto_sns.delete mytopic region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) conn.delete_topic(get_arn(name, region, key, keyid, profile)) log.info('Deleted SNS topic %s', name) _invalidate_cache() return True def get_all_subscriptions_by_topic(name, region=None, key=None, keyid=None, profile=None): ''' Get list of all subscriptions to a specific topic. CLI example to delete a topic:: salt myminion boto_sns.get_all_subscriptions_by_topic mytopic region=us-east-1 ''' cache_key = _subscriptions_cache_key(name) try: return __context__[cache_key] except KeyError: pass conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) ret = conn.get_all_subscriptions_by_topic(get_arn(name, region, key, keyid, profile)) __context__[cache_key] = ret['ListSubscriptionsByTopicResponse']['ListSubscriptionsByTopicResult']['Subscriptions'] return __context__[cache_key] def unsubscribe(topic, subscription_arn, region=None, key=None, keyid=None, profile=None): ''' Unsubscribe a specific SubscriptionArn of a topic. CLI Example: .. code-block:: bash salt myminion boto_sns.unsubscribe my_topic my_subscription_arn region=us-east-1 .. versionadded:: 2016.11.0 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if subscription_arn.startswith('arn:aws:sns:') is False: return False try: conn.unsubscribe(subscription_arn) log.info('Unsubscribe %s to %s topic', subscription_arn, topic) except Exception as e: log.error('Unsubscribe Error', exc_info=True) return False else: __context__.pop(_subscriptions_cache_key(topic), None) return True def get_arn(name, region=None, key=None, keyid=None, profile=None): ''' Returns the full ARN for a given topic name. CLI example:: salt myminion boto_sns.get_arn mytopic ''' if name.startswith('arn:aws:sns:'): return name account_id = __salt__['boto_iam.get_account_id']( region=region, key=key, keyid=keyid, profile=profile ) return 'arn:aws:sns:{0}:{1}:{2}'.format(_get_region(region, profile), account_id, name) def _get_region(region=None, profile=None): if profile and 'region' in profile: return profile['region'] if not region and __salt__['config.option'](profile): _profile = __salt__['config.option'](profile) region = _profile.get('region', None) if not region and __salt__['config.option']('sns.region'): region = __salt__['config.option']('sns.region') if not region: region = 'us-east-1' return region def _subscriptions_cache_key(name): return '{0}_{1}_subscriptions'.format(_cache_get_key(), name) def _invalidate_cache(): try: del __context__[_cache_get_key()] except KeyError: pass def _cache_get_key(): return 'boto_sns.topics_cache'
saltstack/salt
salt/modules/boto_sns.py
unsubscribe
python
def unsubscribe(topic, subscription_arn, region=None, key=None, keyid=None, profile=None): ''' Unsubscribe a specific SubscriptionArn of a topic. CLI Example: .. code-block:: bash salt myminion boto_sns.unsubscribe my_topic my_subscription_arn region=us-east-1 .. versionadded:: 2016.11.0 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if subscription_arn.startswith('arn:aws:sns:') is False: return False try: conn.unsubscribe(subscription_arn) log.info('Unsubscribe %s to %s topic', subscription_arn, topic) except Exception as e: log.error('Unsubscribe Error', exc_info=True) return False else: __context__.pop(_subscriptions_cache_key(topic), None) return True
Unsubscribe a specific SubscriptionArn of a topic. CLI Example: .. code-block:: bash salt myminion boto_sns.unsubscribe my_topic my_subscription_arn region=us-east-1 .. versionadded:: 2016.11.0
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_sns.py#L186-L211
[ "def _subscriptions_cache_key(name):\n return '{0}_{1}_subscriptions'.format(_cache_get_key(), name)\n" ]
# -*- coding: utf-8 -*- ''' Connection module for Amazon SNS :configuration: This module accepts explicit sns credentials but can also utilize IAM roles assigned to the instance through Instance Profiles. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More Information available at: .. code-block:: text http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html If IAM roles are not used you need to specify them either in a pillar or in the minion's config file: .. code-block:: yaml sns.keyid: GKTADJGHEIQSXMKKRBJ08H sns.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs A region may also be specified in the configuration: .. code-block:: yaml sns.region: us-east-1 If a region is not specified, the default is us-east-1. It's also possible to specify key, keyid and region via a profile, either as a passed in dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 :depends: boto ''' # keep lint from choking on _get_conn and _cache_id #pylint: disable=E0602 # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import logging # Import Salt libs import salt.utils.versions log = logging.getLogger(__name__) # Import third party libs try: #pylint: disable=unused-import import boto import boto.sns #pylint: enable=unused-import logging.getLogger('boto').setLevel(logging.CRITICAL) HAS_BOTO = True except ImportError: HAS_BOTO = False def __virtual__(): ''' Only load if boto libraries exist. ''' has_boto_reqs = salt.utils.versions.check_boto_reqs( check_boto3=False ) if has_boto_reqs is True: __utils__['boto.assign_funcs'](__name__, 'sns', pack=__salt__) return has_boto_reqs def get_all_topics(region=None, key=None, keyid=None, profile=None): ''' Returns a list of the all topics.. CLI example:: salt myminion boto_sns.get_all_topics ''' cache_key = _cache_get_key() try: return __context__[cache_key] except KeyError: pass conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) __context__[cache_key] = {} # TODO: support >100 SNS topics (via NextToken) topics = conn.get_all_topics() for t in topics['ListTopicsResponse']['ListTopicsResult']['Topics']: short_name = t['TopicArn'].split(':')[-1] __context__[cache_key][short_name] = t['TopicArn'] return __context__[cache_key] def exists(name, region=None, key=None, keyid=None, profile=None): ''' Check to see if an SNS topic exists. CLI example:: salt myminion boto_sns.exists mytopic region=us-east-1 ''' topics = get_all_topics(region=region, key=key, keyid=keyid, profile=profile) if name.startswith('arn:aws:sns:'): return name in list(topics.values()) else: return name in list(topics.keys()) def create(name, region=None, key=None, keyid=None, profile=None): ''' Create an SNS topic. CLI example to create a topic:: salt myminion boto_sns.create mytopic region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) conn.create_topic(name) log.info('Created SNS topic %s', name) _invalidate_cache() return True def delete(name, region=None, key=None, keyid=None, profile=None): ''' Delete an SNS topic. CLI example to delete a topic:: salt myminion boto_sns.delete mytopic region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) conn.delete_topic(get_arn(name, region, key, keyid, profile)) log.info('Deleted SNS topic %s', name) _invalidate_cache() return True def get_all_subscriptions_by_topic(name, region=None, key=None, keyid=None, profile=None): ''' Get list of all subscriptions to a specific topic. CLI example to delete a topic:: salt myminion boto_sns.get_all_subscriptions_by_topic mytopic region=us-east-1 ''' cache_key = _subscriptions_cache_key(name) try: return __context__[cache_key] except KeyError: pass conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) ret = conn.get_all_subscriptions_by_topic(get_arn(name, region, key, keyid, profile)) __context__[cache_key] = ret['ListSubscriptionsByTopicResponse']['ListSubscriptionsByTopicResult']['Subscriptions'] return __context__[cache_key] def subscribe(topic, protocol, endpoint, region=None, key=None, keyid=None, profile=None): ''' Subscribe to a Topic. CLI example to delete a topic:: salt myminion boto_sns.subscribe mytopic https https://www.example.com/sns-endpoint region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) conn.subscribe(get_arn(topic, region, key, keyid, profile), protocol, endpoint) log.info('Subscribe %s %s to %s topic', protocol, endpoint, topic) try: del __context__[_subscriptions_cache_key(topic)] except KeyError: pass return True def get_arn(name, region=None, key=None, keyid=None, profile=None): ''' Returns the full ARN for a given topic name. CLI example:: salt myminion boto_sns.get_arn mytopic ''' if name.startswith('arn:aws:sns:'): return name account_id = __salt__['boto_iam.get_account_id']( region=region, key=key, keyid=keyid, profile=profile ) return 'arn:aws:sns:{0}:{1}:{2}'.format(_get_region(region, profile), account_id, name) def _get_region(region=None, profile=None): if profile and 'region' in profile: return profile['region'] if not region and __salt__['config.option'](profile): _profile = __salt__['config.option'](profile) region = _profile.get('region', None) if not region and __salt__['config.option']('sns.region'): region = __salt__['config.option']('sns.region') if not region: region = 'us-east-1' return region def _subscriptions_cache_key(name): return '{0}_{1}_subscriptions'.format(_cache_get_key(), name) def _invalidate_cache(): try: del __context__[_cache_get_key()] except KeyError: pass def _cache_get_key(): return 'boto_sns.topics_cache'
saltstack/salt
salt/modules/boto_sns.py
get_arn
python
def get_arn(name, region=None, key=None, keyid=None, profile=None): ''' Returns the full ARN for a given topic name. CLI example:: salt myminion boto_sns.get_arn mytopic ''' if name.startswith('arn:aws:sns:'): return name account_id = __salt__['boto_iam.get_account_id']( region=region, key=key, keyid=keyid, profile=profile ) return 'arn:aws:sns:{0}:{1}:{2}'.format(_get_region(region, profile), account_id, name)
Returns the full ARN for a given topic name. CLI example:: salt myminion boto_sns.get_arn mytopic
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_sns.py#L214-L229
[ "def _get_region(region=None, profile=None):\n if profile and 'region' in profile:\n return profile['region']\n if not region and __salt__['config.option'](profile):\n _profile = __salt__['config.option'](profile)\n region = _profile.get('region', None)\n if not region and __salt__['config.option']('sns.region'):\n region = __salt__['config.option']('sns.region')\n if not region:\n region = 'us-east-1'\n return region\n" ]
# -*- coding: utf-8 -*- ''' Connection module for Amazon SNS :configuration: This module accepts explicit sns credentials but can also utilize IAM roles assigned to the instance through Instance Profiles. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More Information available at: .. code-block:: text http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html If IAM roles are not used you need to specify them either in a pillar or in the minion's config file: .. code-block:: yaml sns.keyid: GKTADJGHEIQSXMKKRBJ08H sns.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs A region may also be specified in the configuration: .. code-block:: yaml sns.region: us-east-1 If a region is not specified, the default is us-east-1. It's also possible to specify key, keyid and region via a profile, either as a passed in dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 :depends: boto ''' # keep lint from choking on _get_conn and _cache_id #pylint: disable=E0602 # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import logging # Import Salt libs import salt.utils.versions log = logging.getLogger(__name__) # Import third party libs try: #pylint: disable=unused-import import boto import boto.sns #pylint: enable=unused-import logging.getLogger('boto').setLevel(logging.CRITICAL) HAS_BOTO = True except ImportError: HAS_BOTO = False def __virtual__(): ''' Only load if boto libraries exist. ''' has_boto_reqs = salt.utils.versions.check_boto_reqs( check_boto3=False ) if has_boto_reqs is True: __utils__['boto.assign_funcs'](__name__, 'sns', pack=__salt__) return has_boto_reqs def get_all_topics(region=None, key=None, keyid=None, profile=None): ''' Returns a list of the all topics.. CLI example:: salt myminion boto_sns.get_all_topics ''' cache_key = _cache_get_key() try: return __context__[cache_key] except KeyError: pass conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) __context__[cache_key] = {} # TODO: support >100 SNS topics (via NextToken) topics = conn.get_all_topics() for t in topics['ListTopicsResponse']['ListTopicsResult']['Topics']: short_name = t['TopicArn'].split(':')[-1] __context__[cache_key][short_name] = t['TopicArn'] return __context__[cache_key] def exists(name, region=None, key=None, keyid=None, profile=None): ''' Check to see if an SNS topic exists. CLI example:: salt myminion boto_sns.exists mytopic region=us-east-1 ''' topics = get_all_topics(region=region, key=key, keyid=keyid, profile=profile) if name.startswith('arn:aws:sns:'): return name in list(topics.values()) else: return name in list(topics.keys()) def create(name, region=None, key=None, keyid=None, profile=None): ''' Create an SNS topic. CLI example to create a topic:: salt myminion boto_sns.create mytopic region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) conn.create_topic(name) log.info('Created SNS topic %s', name) _invalidate_cache() return True def delete(name, region=None, key=None, keyid=None, profile=None): ''' Delete an SNS topic. CLI example to delete a topic:: salt myminion boto_sns.delete mytopic region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) conn.delete_topic(get_arn(name, region, key, keyid, profile)) log.info('Deleted SNS topic %s', name) _invalidate_cache() return True def get_all_subscriptions_by_topic(name, region=None, key=None, keyid=None, profile=None): ''' Get list of all subscriptions to a specific topic. CLI example to delete a topic:: salt myminion boto_sns.get_all_subscriptions_by_topic mytopic region=us-east-1 ''' cache_key = _subscriptions_cache_key(name) try: return __context__[cache_key] except KeyError: pass conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) ret = conn.get_all_subscriptions_by_topic(get_arn(name, region, key, keyid, profile)) __context__[cache_key] = ret['ListSubscriptionsByTopicResponse']['ListSubscriptionsByTopicResult']['Subscriptions'] return __context__[cache_key] def subscribe(topic, protocol, endpoint, region=None, key=None, keyid=None, profile=None): ''' Subscribe to a Topic. CLI example to delete a topic:: salt myminion boto_sns.subscribe mytopic https https://www.example.com/sns-endpoint region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) conn.subscribe(get_arn(topic, region, key, keyid, profile), protocol, endpoint) log.info('Subscribe %s %s to %s topic', protocol, endpoint, topic) try: del __context__[_subscriptions_cache_key(topic)] except KeyError: pass return True def unsubscribe(topic, subscription_arn, region=None, key=None, keyid=None, profile=None): ''' Unsubscribe a specific SubscriptionArn of a topic. CLI Example: .. code-block:: bash salt myminion boto_sns.unsubscribe my_topic my_subscription_arn region=us-east-1 .. versionadded:: 2016.11.0 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if subscription_arn.startswith('arn:aws:sns:') is False: return False try: conn.unsubscribe(subscription_arn) log.info('Unsubscribe %s to %s topic', subscription_arn, topic) except Exception as e: log.error('Unsubscribe Error', exc_info=True) return False else: __context__.pop(_subscriptions_cache_key(topic), None) return True def _get_region(region=None, profile=None): if profile and 'region' in profile: return profile['region'] if not region and __salt__['config.option'](profile): _profile = __salt__['config.option'](profile) region = _profile.get('region', None) if not region and __salt__['config.option']('sns.region'): region = __salt__['config.option']('sns.region') if not region: region = 'us-east-1' return region def _subscriptions_cache_key(name): return '{0}_{1}_subscriptions'.format(_cache_get_key(), name) def _invalidate_cache(): try: del __context__[_cache_get_key()] except KeyError: pass def _cache_get_key(): return 'boto_sns.topics_cache'
saltstack/salt
salt/utils/asynchronous.py
current_ioloop
python
def current_ioloop(io_loop): ''' A context manager that will set the current ioloop to io_loop for the context ''' orig_loop = tornado.ioloop.IOLoop.current() io_loop.make_current() try: yield finally: orig_loop.make_current()
A context manager that will set the current ioloop to io_loop for the context
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/asynchronous.py#L15-L24
null
# -*- coding: utf-8 -*- ''' Helpers/utils for working with tornado asynchronous stuff ''' from __future__ import absolute_import, print_function, unicode_literals import tornado.ioloop import tornado.concurrent import contextlib from salt.utils import zeromq @contextlib.contextmanager class SyncWrapper(object): ''' A wrapper to make Async classes synchronous This is uses as a simple wrapper, for example: asynchronous = AsyncClass() # this method would reguarly return a future future = asynchronous.async_method() sync = SyncWrapper(async_factory_method, (arg1, arg2), {'kwarg1': 'val'}) # the sync wrapper will automatically wait on the future ret = sync.async_method() ''' def __init__(self, method, args=tuple(), kwargs=None): if kwargs is None: kwargs = {} self.io_loop = zeromq.ZMQDefaultLoop() kwargs['io_loop'] = self.io_loop with current_ioloop(self.io_loop): self.asynchronous = method(*args, **kwargs) def __getattribute__(self, key): try: return object.__getattribute__(self, key) except AttributeError as ex: if key == 'asynchronous': raise ex attr = getattr(self.asynchronous, key) if hasattr(attr, '__call__'): def wrap(*args, **kwargs): # Overload the ioloop for the func call-- since it might call .current() with current_ioloop(self.io_loop): ret = attr(*args, **kwargs) if isinstance(ret, tornado.concurrent.Future): ret = self._block_future(ret) return ret return wrap else: return attr def _block_future(self, future): self.io_loop.add_future(future, lambda future: self.io_loop.stop()) self.io_loop.start() return future.result() def __del__(self): ''' On deletion of the asynchronous wrapper, make sure to clean up the asynchronous stuff ''' if hasattr(self, 'asynchronous'): if hasattr(self.asynchronous, 'close'): # Certain things such as streams should be closed before # their associated io_loop is closed to allow for proper # cleanup. self.asynchronous.close() elif hasattr(self.asynchronous, 'destroy'): # Certain things such as streams should be closed before # their associated io_loop is closed to allow for proper # cleanup. self.asynchronous.destroy() del self.asynchronous self.io_loop.close() del self.io_loop elif hasattr(self, 'io_loop'): self.io_loop.close() del self.io_loop
saltstack/salt
salt/modules/saltutil.py
_get_top_file_envs
python
def _get_top_file_envs(): ''' Get all environments from the top file ''' try: return __context__['saltutil._top_file_envs'] except KeyError: try: st_ = salt.state.HighState(__opts__, initial_pillar=__pillar__) top = st_.get_top() if top: envs = list(st_.top_matches(top).keys()) or 'base' else: envs = 'base' except SaltRenderError as exc: raise CommandExecutionError( 'Unable to render top file(s): {0}'.format(exc) ) __context__['saltutil._top_file_envs'] = envs return envs
Get all environments from the top file
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/saltutil.py#L78-L98
null
# -*- coding: utf-8 -*- ''' The Saltutil module is used to manage the state of the salt minion itself. It is used to manage minion modules as well as automate updates to the salt minion. :depends: - esky Python module for update functionality ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import copy import fnmatch import logging import os import signal import sys import time import shutil # Import 3rd-party libs # pylint: disable=import-error try: import esky from esky import EskyVersionError HAS_ESKY = True except ImportError: HAS_ESKY = False # pylint: disable=no-name-in-module from salt.ext import six from salt.ext.six.moves.urllib.error import URLError # pylint: enable=import-error,no-name-in-module # Fix a nasty bug with Win32 Python not supporting all of the standard signals try: salt_SIGKILL = signal.SIGKILL except AttributeError: salt_SIGKILL = signal.SIGTERM # Import salt libs import salt import salt.config import salt.client import salt.client.ssh.client import salt.defaults.events import salt.payload import salt.runner import salt.state import salt.utils.args import salt.utils.event import salt.utils.extmods import salt.utils.files import salt.utils.functools import salt.utils.master import salt.utils.minion import salt.utils.path import salt.utils.process import salt.utils.url import salt.wheel import salt.transport.client HAS_PSUTIL = True try: import salt.utils.psutil_compat except ImportError: HAS_PSUTIL = False from salt.exceptions import ( SaltReqTimeoutError, SaltRenderError, CommandExecutionError, SaltInvocationError ) __proxyenabled__ = ['*'] log = logging.getLogger(__name__) def _sync(form, saltenv=None, extmod_whitelist=None, extmod_blacklist=None): ''' Sync the given directory in the given environment ''' if saltenv is None: saltenv = _get_top_file_envs() if isinstance(saltenv, six.string_types): saltenv = saltenv.split(',') ret, touched = salt.utils.extmods.sync(__opts__, form, saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist) # Dest mod_dir is touched? trigger reload if requested if touched: mod_file = os.path.join(__opts__['cachedir'], 'module_refresh') with salt.utils.files.fopen(mod_file, 'a'): pass if form == 'grains' and \ __opts__.get('grains_cache') and \ os.path.isfile(os.path.join(__opts__['cachedir'], 'grains.cache.p')): try: os.remove(os.path.join(__opts__['cachedir'], 'grains.cache.p')) except OSError: log.error('Could not remove grains cache!') return ret def update(version=None): ''' Update the salt minion from the URL defined in opts['update_url'] SaltStack, Inc provides the latest builds here: update_url: https://repo.saltstack.com/windows/ Be aware that as of 2014-8-11 there's a bug in esky such that only the latest version available in the update_url can be downloaded and installed. This feature requires the minion to be running a bdist_esky build. The version number is optional and will default to the most recent version available at opts['update_url']. Returns details about the transaction upon completion. CLI Examples: .. code-block:: bash salt '*' saltutil.update salt '*' saltutil.update 0.10.3 ''' ret = {} if not HAS_ESKY: ret['_error'] = 'Esky not available as import' return ret if not getattr(sys, 'frozen', False): ret['_error'] = 'Minion is not running an Esky build' return ret if not __salt__['config.option']('update_url'): ret['_error'] = '"update_url" not configured on this minion' return ret app = esky.Esky(sys.executable, __opts__['update_url']) oldversion = __grains__['saltversion'] if not version: try: version = app.find_update() except URLError as exc: ret['_error'] = 'Could not connect to update_url. Error: {0}'.format(exc) return ret if not version: ret['_error'] = 'No updates available' return ret try: app.fetch_version(version) except EskyVersionError as exc: ret['_error'] = 'Unable to fetch version {0}. Error: {1}'.format(version, exc) return ret try: app.install_version(version) except EskyVersionError as exc: ret['_error'] = 'Unable to install version {0}. Error: {1}'.format(version, exc) return ret try: app.cleanup() except Exception as exc: ret['_error'] = 'Unable to cleanup. Error: {0}'.format(exc) restarted = {} for service in __opts__['update_restart_services']: restarted[service] = __salt__['service.restart'](service) ret['comment'] = 'Updated from {0} to {1}'.format(oldversion, version) ret['restarted'] = restarted return ret def sync_beacons(saltenv=None, refresh=True, extmod_whitelist=None, extmod_blacklist=None): ''' .. versionadded:: 2015.5.1 Sync beacons from ``salt://_beacons`` to the minion saltenv The fileserver environment from which to sync. To sync from more than one environment, pass a comma-separated list. If not passed, then all environments configured in the :ref:`top files <states-top>` will be checked for beacons to sync. If no top files are found, then the ``base`` environment will be synced. refresh : True If ``True``, refresh the available beacons on the minion. This refresh will be performed even if no new beacons are synced. Set to ``False`` to prevent this refresh. extmod_whitelist : None comma-seperated list of modules to sync extmod_blacklist : None comma-seperated list of modules to blacklist based on type CLI Example: .. code-block:: bash salt '*' saltutil.sync_beacons salt '*' saltutil.sync_beacons saltenv=dev salt '*' saltutil.sync_beacons saltenv=base,dev ''' ret = _sync('beacons', saltenv, extmod_whitelist, extmod_blacklist) if refresh: refresh_beacons() return ret def sync_sdb(saltenv=None, extmod_whitelist=None, extmod_blacklist=None): ''' .. versionadded:: 2015.5.8,2015.8.3 Sync sdb modules from ``salt://_sdb`` to the minion saltenv The fileserver environment from which to sync. To sync from more than one environment, pass a comma-separated list. If not passed, then all environments configured in the :ref:`top files <states-top>` will be checked for sdb modules to sync. If no top files are found, then the ``base`` environment will be synced. refresh : False This argument has no affect and is included for consistency with the other sync functions. extmod_whitelist : None comma-seperated list of modules to sync extmod_blacklist : None comma-seperated list of modules to blacklist based on type CLI Example: .. code-block:: bash salt '*' saltutil.sync_sdb salt '*' saltutil.sync_sdb saltenv=dev salt '*' saltutil.sync_sdb saltenv=base,dev ''' ret = _sync('sdb', saltenv, extmod_whitelist, extmod_blacklist) return ret def sync_modules(saltenv=None, refresh=True, extmod_whitelist=None, extmod_blacklist=None): ''' .. versionadded:: 0.10.0 Sync execution modules from ``salt://_modules`` to the minion saltenv The fileserver environment from which to sync. To sync from more than one environment, pass a comma-separated list. If not passed, then all environments configured in the :ref:`top files <states-top>` will be checked for execution modules to sync. If no top files are found, then the ``base`` environment will be synced. refresh : True If ``True``, refresh the available execution modules on the minion. This refresh will be performed even if no new execution modules are synced. Set to ``False`` to prevent this refresh. .. important:: If this function is executed using a :py:func:`module.run <salt.states.module.run>` state, the SLS file will not have access to newly synced execution modules unless a ``refresh`` argument is added to the state, like so: .. code-block:: yaml load_my_custom_module: module.run: - name: saltutil.sync_modules - refresh: True See :ref:`here <reloading-modules>` for a more detailed explanation of why this is necessary. extmod_whitelist : None comma-seperated list of modules to sync extmod_blacklist : None comma-seperated list of modules to blacklist based on type CLI Example: .. code-block:: bash salt '*' saltutil.sync_modules salt '*' saltutil.sync_modules saltenv=dev salt '*' saltutil.sync_modules saltenv=base,dev ''' ret = _sync('modules', saltenv, extmod_whitelist, extmod_blacklist) if refresh: refresh_modules() return ret def sync_states(saltenv=None, refresh=True, extmod_whitelist=None, extmod_blacklist=None): ''' .. versionadded:: 0.10.0 Sync state modules from ``salt://_states`` to the minion saltenv The fileserver environment from which to sync. To sync from more than one environment, pass a comma-separated list. If not passed, then all environments configured in the :ref:`top files <states-top>` will be checked for state modules to sync. If no top files are found, then the ``base`` environment will be synced. refresh : True If ``True``, refresh the available states on the minion. This refresh will be performed even if no new state modules are synced. Set to ``False`` to prevent this refresh. extmod_whitelist : None comma-seperated list of modules to sync extmod_blacklist : None comma-seperated list of modules to blacklist based on type CLI Examples: .. code-block:: bash salt '*' saltutil.sync_states salt '*' saltutil.sync_states saltenv=dev salt '*' saltutil.sync_states saltenv=base,dev ''' ret = _sync('states', saltenv, extmod_whitelist, extmod_blacklist) if refresh: refresh_modules() return ret def refresh_grains(**kwargs): ''' .. versionadded:: 2016.3.6,2016.11.4,2017.7.0 Refresh the minion's grains without syncing custom grains modules from ``salt://_grains``. .. note:: The available execution modules will be reloaded as part of this proceess, as grains can affect which modules are available. refresh_pillar : True Set to ``False`` to keep pillar data from being refreshed. CLI Examples: .. code-block:: bash salt '*' saltutil.refresh_grains ''' kwargs = salt.utils.args.clean_kwargs(**kwargs) _refresh_pillar = kwargs.pop('refresh_pillar', True) if kwargs: salt.utils.args.invalid_kwargs(kwargs) # Modules and pillar need to be refreshed in case grains changes affected # them, and the module refresh process reloads the grains and assigns the # newly-reloaded grains to each execution module's __grains__ dunder. refresh_modules() if _refresh_pillar: refresh_pillar() return True def sync_grains(saltenv=None, refresh=True, extmod_whitelist=None, extmod_blacklist=None): ''' .. versionadded:: 0.10.0 Sync grains modules from ``salt://_grains`` to the minion saltenv The fileserver environment from which to sync. To sync from more than one environment, pass a comma-separated list. If not passed, then all environments configured in the :ref:`top files <states-top>` will be checked for grains modules to sync. If no top files are found, then the ``base`` environment will be synced. refresh : True If ``True``, refresh the available execution modules and recompile pillar data for the minion. This refresh will be performed even if no new grains modules are synced. Set to ``False`` to prevent this refresh. extmod_whitelist : None comma-seperated list of modules to sync extmod_blacklist : None comma-seperated list of modules to blacklist based on type CLI Examples: .. code-block:: bash salt '*' saltutil.sync_grains salt '*' saltutil.sync_grains saltenv=dev salt '*' saltutil.sync_grains saltenv=base,dev ''' ret = _sync('grains', saltenv, extmod_whitelist, extmod_blacklist) if refresh: refresh_modules() refresh_pillar() return ret def sync_renderers(saltenv=None, refresh=True, extmod_whitelist=None, extmod_blacklist=None): ''' .. versionadded:: 0.10.0 Sync renderers from ``salt://_renderers`` to the minion saltenv The fileserver environment from which to sync. To sync from more than one environment, pass a comma-separated list. If not passed, then all environments configured in the :ref:`top files <states-top>` will be checked for renderers to sync. If no top files are found, then the ``base`` environment will be synced. refresh : True If ``True``, refresh the available execution modules on the minion. This refresh will be performed even if no new renderers are synced. Set to ``False`` to prevent this refresh. Set to ``False`` to prevent this refresh. extmod_whitelist : None comma-seperated list of modules to sync extmod_blacklist : None comma-seperated list of modules to blacklist based on type CLI Examples: .. code-block:: bash salt '*' saltutil.sync_renderers salt '*' saltutil.sync_renderers saltenv=dev salt '*' saltutil.sync_renderers saltenv=base,dev ''' ret = _sync('renderers', saltenv, extmod_whitelist, extmod_blacklist) if refresh: refresh_modules() return ret def sync_returners(saltenv=None, refresh=True, extmod_whitelist=None, extmod_blacklist=None): ''' .. versionadded:: 0.10.0 Sync returners from ``salt://_returners`` to the minion saltenv The fileserver environment from which to sync. To sync from more than one environment, pass a comma-separated list. If not passed, then all environments configured in the :ref:`top files <states-top>` will be checked for returners to sync. If no top files are found, then the ``base`` environment will be synced. refresh : True If ``True``, refresh the available execution modules on the minion. This refresh will be performed even if no new returners are synced. Set to ``False`` to prevent this refresh. extmod_whitelist : None comma-seperated list of modules to sync extmod_blacklist : None comma-seperated list of modules to blacklist based on type CLI Examples: .. code-block:: bash salt '*' saltutil.sync_returners salt '*' saltutil.sync_returners saltenv=dev ''' ret = _sync('returners', saltenv, extmod_whitelist, extmod_blacklist) if refresh: refresh_modules() return ret def sync_proxymodules(saltenv=None, refresh=False, extmod_whitelist=None, extmod_blacklist=None): ''' .. versionadded:: 2015.8.2 Sync proxy modules from ``salt://_proxy`` to the minion saltenv The fileserver environment from which to sync. To sync from more than one environment, pass a comma-separated list. If not passed, then all environments configured in the :ref:`top files <states-top>` will be checked for proxy modules to sync. If no top files are found, then the ``base`` environment will be synced. refresh : True If ``True``, refresh the available execution modules on the minion. This refresh will be performed even if no new proxy modules are synced. Set to ``False`` to prevent this refresh. extmod_whitelist : None comma-seperated list of modules to sync extmod_blacklist : None comma-seperated list of modules to blacklist based on type CLI Examples: .. code-block:: bash salt '*' saltutil.sync_proxymodules salt '*' saltutil.sync_proxymodules saltenv=dev salt '*' saltutil.sync_proxymodules saltenv=base,dev ''' ret = _sync('proxy', saltenv, extmod_whitelist, extmod_blacklist) if refresh: refresh_modules() return ret def sync_matchers(saltenv=None, refresh=False, extmod_whitelist=None, extmod_blacklist=None): ''' .. versionadded:: 2019.2.0 Sync engine modules from ``salt://_matchers`` to the minion saltenv The fileserver environment from which to sync. To sync from more than one environment, pass a comma-separated list. If not passed, then all environments configured in the :ref:`top files <states-top>` will be checked for engines to sync. If no top files are found, then the ``base`` environment will be synced. refresh : True If ``True``, refresh the available execution modules on the minion. This refresh will be performed even if no new matcher modules are synced. Set to ``False`` to prevent this refresh. extmod_whitelist : None comma-separated list of modules to sync extmod_blacklist : None comma-separated list of modules to blacklist based on type CLI Examples: .. code-block:: bash salt '*' saltutil.sync_matchers salt '*' saltutil.sync_matchers saltenv=base,dev ''' ret = _sync('matchers', saltenv, extmod_whitelist, extmod_blacklist) if refresh: refresh_modules() return ret def sync_engines(saltenv=None, refresh=False, extmod_whitelist=None, extmod_blacklist=None): ''' .. versionadded:: 2016.3.0 Sync engine modules from ``salt://_engines`` to the minion saltenv The fileserver environment from which to sync. To sync from more than one environment, pass a comma-separated list. If not passed, then all environments configured in the :ref:`top files <states-top>` will be checked for engines to sync. If no top files are found, then the ``base`` environment will be synced. refresh : True If ``True``, refresh the available execution modules on the minion. This refresh will be performed even if no new engine modules are synced. Set to ``False`` to prevent this refresh. extmod_whitelist : None comma-seperated list of modules to sync extmod_blacklist : None comma-seperated list of modules to blacklist based on type CLI Examples: .. code-block:: bash salt '*' saltutil.sync_engines salt '*' saltutil.sync_engines saltenv=base,dev ''' ret = _sync('engines', saltenv, extmod_whitelist, extmod_blacklist) if refresh: refresh_modules() return ret def sync_thorium(saltenv=None, refresh=False, extmod_whitelist=None, extmod_blacklist=None): ''' .. versionadded:: 2018.3.0 Sync Thorium modules from ``salt://_thorium`` to the minion saltenv The fileserver environment from which to sync. To sync from more than one environment, pass a comma-separated list. If not passed, then all environments configured in the :ref:`top files <states-top>` will be checked for engines to sync. If no top files are found, then the ``base`` environment will be synced. refresh: ``True`` If ``True``, refresh the available execution modules on the minion. This refresh will be performed even if no new Thorium modules are synced. Set to ``False`` to prevent this refresh. extmod_whitelist comma-seperated list of modules to sync extmod_blacklist comma-seperated list of modules to blacklist based on type CLI Examples: .. code-block:: bash salt '*' saltutil.sync_thorium salt '*' saltutil.sync_thorium saltenv=base,dev ''' ret = _sync('thorium', saltenv, extmod_whitelist, extmod_blacklist) if refresh: refresh_modules() return ret def sync_output(saltenv=None, refresh=True, extmod_whitelist=None, extmod_blacklist=None): ''' Sync outputters from ``salt://_output`` to the minion saltenv The fileserver environment from which to sync. To sync from more than one environment, pass a comma-separated list. If not passed, then all environments configured in the :ref:`top files <states-top>` will be checked for outputters to sync. If no top files are found, then the ``base`` environment will be synced. refresh : True If ``True``, refresh the available execution modules on the minion. This refresh will be performed even if no new outputters are synced. Set to ``False`` to prevent this refresh. extmod_whitelist : None comma-seperated list of modules to sync extmod_blacklist : None comma-seperated list of modules to blacklist based on type CLI Examples: .. code-block:: bash salt '*' saltutil.sync_output salt '*' saltutil.sync_output saltenv=dev salt '*' saltutil.sync_output saltenv=base,dev ''' ret = _sync('output', saltenv, extmod_whitelist, extmod_blacklist) if refresh: refresh_modules() return ret sync_outputters = salt.utils.functools.alias_function(sync_output, 'sync_outputters') def sync_clouds(saltenv=None, refresh=True, extmod_whitelist=None, extmod_blacklist=None): ''' .. versionadded:: 2017.7.0 Sync cloud modules from ``salt://_cloud`` to the minion saltenv : base The fileserver environment from which to sync. To sync from more than one environment, pass a comma-separated list. refresh : True If ``True``, refresh the available execution modules on the minion. This refresh will be performed even if no new utility modules are synced. Set to ``False`` to prevent this refresh. extmod_whitelist : None comma-seperated list of modules to sync extmod_blacklist : None comma-seperated list of modules to blacklist based on type CLI Examples: .. code-block:: bash salt '*' saltutil.sync_clouds salt '*' saltutil.sync_clouds saltenv=dev salt '*' saltutil.sync_clouds saltenv=base,dev ''' ret = _sync('clouds', saltenv, extmod_whitelist, extmod_blacklist) if refresh: refresh_modules() return ret def sync_utils(saltenv=None, refresh=True, extmod_whitelist=None, extmod_blacklist=None): ''' .. versionadded:: 2014.7.0 Sync utility modules from ``salt://_utils`` to the minion saltenv The fileserver environment from which to sync. To sync from more than one environment, pass a comma-separated list. If not passed, then all environments configured in the :ref:`top files <states-top>` will be checked for utility modules to sync. If no top files are found, then the ``base`` environment will be synced. refresh : True If ``True``, refresh the available execution modules on the minion. This refresh will be performed even if no new utility modules are synced. Set to ``False`` to prevent this refresh. extmod_whitelist : None comma-seperated list of modules to sync extmod_blacklist : None comma-seperated list of modules to blacklist based on type CLI Examples: .. code-block:: bash salt '*' saltutil.sync_utils salt '*' saltutil.sync_utils saltenv=dev salt '*' saltutil.sync_utils saltenv=base,dev ''' ret = _sync('utils', saltenv, extmod_whitelist, extmod_blacklist) if refresh: refresh_modules() return ret def sync_serializers(saltenv=None, refresh=True, extmod_whitelist=None, extmod_blacklist=None): ''' .. versionadded:: 2019.2.0 Sync serializers from ``salt://_serializers`` to the minion saltenv The fileserver environment from which to sync. To sync from more than one environment, pass a comma-separated list. If not passed, then all environments configured in the :ref:`top files <states-top>` will be checked for serializer modules to sync. If no top files are found, then the ``base`` environment will be synced. refresh : True If ``True``, refresh the available execution modules on the minion. This refresh will be performed even if no new serializer modules are synced. Set to ``False`` to prevent this refresh. extmod_whitelist : None comma-seperated list of modules to sync extmod_blacklist : None comma-seperated list of modules to blacklist based on type CLI Examples: .. code-block:: bash salt '*' saltutil.sync_serializers salt '*' saltutil.sync_serializers saltenv=dev salt '*' saltutil.sync_serializers saltenv=base,dev ''' ret = _sync('serializers', saltenv, extmod_whitelist, extmod_blacklist) if refresh: refresh_modules() return ret def sync_executors(saltenv=None, refresh=True, extmod_whitelist=None, extmod_blacklist=None): ''' .. versionadded:: Neon Sync executors from ``salt://_executors`` to the minion saltenv The fileserver environment from which to sync. To sync from more than one environment, pass a comma-separated list. If not passed, then all environments configured in the :ref:`top files <states-top>` will be checked for executor modules to sync. If no top files are found, then the ``base`` environment will be synced. refresh : True If ``True``, refresh the available execution modules on the minion. This refresh will be performed even if no new serializer modules are synced. Set to ``False`` to prevent this refresh. extmod_whitelist : None comma-seperated list of modules to sync extmod_blacklist : None comma-seperated list of modules to blacklist based on type CLI Examples: .. code-block:: bash salt '*' saltutil.sync_executors salt '*' saltutil.sync_executors saltenv=dev salt '*' saltutil.sync_executors saltenv=base,dev ''' ret = _sync('executors', saltenv, extmod_whitelist, extmod_blacklist) if refresh: refresh_modules() return ret def list_extmods(): ''' .. versionadded:: 2017.7.0 List Salt modules which have been synced externally CLI Examples: .. code-block:: bash salt '*' saltutil.list_extmods ''' ret = {} ext_dir = os.path.join(__opts__['cachedir'], 'extmods') mod_types = os.listdir(ext_dir) for mod_type in mod_types: ret[mod_type] = set() for _, _, files in salt.utils.path.os_walk(os.path.join(ext_dir, mod_type)): for fh_ in files: ret[mod_type].add(fh_.split('.')[0]) ret[mod_type] = list(ret[mod_type]) return ret def sync_log_handlers(saltenv=None, refresh=True, extmod_whitelist=None, extmod_blacklist=None): ''' .. versionadded:: 2015.8.0 Sync log handlers from ``salt://_log_handlers`` to the minion saltenv The fileserver environment from which to sync. To sync from more than one environment, pass a comma-separated list. If not passed, then all environments configured in the :ref:`top files <states-top>` will be checked for log handlers to sync. If no top files are found, then the ``base`` environment will be synced. refresh : True If ``True``, refresh the available execution modules on the minion. This refresh will be performed even if no new log handlers are synced. Set to ``False`` to prevent this refresh. extmod_whitelist : None comma-seperated list of modules to sync extmod_blacklist : None comma-seperated list of modules to blacklist based on type CLI Examples: .. code-block:: bash salt '*' saltutil.sync_log_handlers salt '*' saltutil.sync_log_handlers saltenv=dev salt '*' saltutil.sync_log_handlers saltenv=base,dev ''' ret = _sync('log_handlers', saltenv, extmod_whitelist, extmod_blacklist) if refresh: refresh_modules() return ret def sync_pillar(saltenv=None, refresh=True, extmod_whitelist=None, extmod_blacklist=None): ''' .. versionadded:: 2015.8.11,2016.3.2 Sync pillar modules from the ``salt://_pillar`` directory on the Salt fileserver. This function is environment-aware, pass the desired environment to grab the contents of the ``_pillar`` directory from that environment. The default environment, if none is specified, is ``base``. refresh : True Also refresh the execution modules available to the minion, and refresh pillar data. extmod_whitelist : None comma-seperated list of modules to sync extmod_blacklist : None comma-seperated list of modules to blacklist based on type .. note:: This function will raise an error if executed on a traditional (i.e. not masterless) minion CLI Examples: .. code-block:: bash salt '*' saltutil.sync_pillar salt '*' saltutil.sync_pillar saltenv=dev ''' if __opts__['file_client'] != 'local': raise CommandExecutionError( 'Pillar modules can only be synced to masterless minions' ) ret = _sync('pillar', saltenv, extmod_whitelist, extmod_blacklist) if refresh: refresh_modules() refresh_pillar() return ret def sync_all(saltenv=None, refresh=True, extmod_whitelist=None, extmod_blacklist=None): ''' .. versionchanged:: 2015.8.11,2016.3.2 On masterless minions, pillar modules are now synced, and refreshed when ``refresh`` is set to ``True``. Sync down all of the dynamic modules from the file server for a specific environment. This function synchronizes custom modules, states, beacons, grains, returners, output modules, renderers, and utils. refresh : True Also refresh the execution modules and recompile pillar data available to the minion. This refresh will be performed even if no new dynamic modules are synced. Set to ``False`` to prevent this refresh. .. important:: If this function is executed using a :py:func:`module.run <salt.states.module.run>` state, the SLS file will not have access to newly synced execution modules unless a ``refresh`` argument is added to the state, like so: .. code-block:: yaml load_my_custom_module: module.run: - name: saltutil.sync_all - refresh: True See :ref:`here <reloading-modules>` for a more detailed explanation of why this is necessary. extmod_whitelist : None dictionary of modules to sync based on type extmod_blacklist : None dictionary of modules to blacklist based on type CLI Examples: .. code-block:: bash salt '*' saltutil.sync_all salt '*' saltutil.sync_all saltenv=dev salt '*' saltutil.sync_all saltenv=base,dev salt '*' saltutil.sync_all extmod_whitelist={'modules': ['custom_module']} ''' log.debug('Syncing all') ret = {} ret['clouds'] = sync_clouds(saltenv, False, extmod_whitelist, extmod_blacklist) ret['beacons'] = sync_beacons(saltenv, False, extmod_whitelist, extmod_blacklist) ret['modules'] = sync_modules(saltenv, False, extmod_whitelist, extmod_blacklist) ret['states'] = sync_states(saltenv, False, extmod_whitelist, extmod_blacklist) ret['sdb'] = sync_sdb(saltenv, extmod_whitelist, extmod_blacklist) ret['grains'] = sync_grains(saltenv, False, extmod_whitelist, extmod_blacklist) ret['renderers'] = sync_renderers(saltenv, False, extmod_whitelist, extmod_blacklist) ret['returners'] = sync_returners(saltenv, False, extmod_whitelist, extmod_blacklist) ret['output'] = sync_output(saltenv, False, extmod_whitelist, extmod_blacklist) ret['utils'] = sync_utils(saltenv, False, extmod_whitelist, extmod_blacklist) ret['log_handlers'] = sync_log_handlers(saltenv, False, extmod_whitelist, extmod_blacklist) ret['proxymodules'] = sync_proxymodules(saltenv, False, extmod_whitelist, extmod_blacklist) ret['engines'] = sync_engines(saltenv, False, extmod_whitelist, extmod_blacklist) ret['thorium'] = sync_thorium(saltenv, False, extmod_whitelist, extmod_blacklist) ret['serializers'] = sync_serializers(saltenv, False, extmod_whitelist, extmod_blacklist) ret['matchers'] = sync_matchers(saltenv, False, extmod_whitelist, extmod_blacklist) ret['executors'] = sync_executors(saltenv, False, extmod_whitelist, extmod_blacklist) if __opts__['file_client'] == 'local': ret['pillar'] = sync_pillar(saltenv, False, extmod_whitelist, extmod_blacklist) if refresh: refresh_modules() refresh_pillar() return ret def refresh_beacons(): ''' Signal the minion to refresh the beacons. CLI Example: .. code-block:: bash salt '*' saltutil.refresh_beacons ''' try: ret = __salt__['event.fire']({}, 'beacons_refresh') except KeyError: log.error('Event module not available. Module refresh failed.') ret = False # Effectively a no-op, since we can't really return without an event system return ret def refresh_matchers(): ''' Signal the minion to refresh its matchers. CLI Example: .. code-block:: bash salt '*' saltutil.refresh_matchers ''' try: ret = __salt__['event.fire']({}, 'matchers_refresh') except KeyError: log.error('Event module not available. Matcher refresh failed.') ret = False # Effectively a no-op, since we can't really return without an event system return ret def refresh_pillar(**kwargs): ''' Signal the minion to refresh the pillar data. .. versionchanged:: Neon The ``async`` argument has been added. The default value is True. CLI Example: .. code-block:: bash salt '*' saltutil.refresh_pillar salt '*' saltutil.refresh_pillar async=False ''' asynchronous = bool(kwargs.get('async', True)) try: if asynchronous: # If we're going to block, first setup a listener ret = __salt__['event.fire']({}, 'pillar_refresh') else: eventer = salt.utils.event.get_event( 'minion', opts=__opts__, listen=True) ret = __salt__['event.fire']({'notify': True}, 'pillar_refresh') # Wait for the finish event to fire log.trace('refresh_pillar waiting for pillar refresh to complete') # Blocks until we hear this event or until the timeout expires eventer.get_event( tag=salt.defaults.events.MINION_PILLAR_COMPLETE, wait=30) except KeyError: log.error('Event module not available. Pillar refresh failed.') ret = False # Effectively a no-op, since we can't really return without an event system return ret pillar_refresh = salt.utils.functools.alias_function(refresh_pillar, 'pillar_refresh') def refresh_modules(**kwargs): ''' Signal the minion to refresh the module and grain data The default is to refresh module asynchronously. To block until the module refresh is complete, set the 'async' flag to False. CLI Example: .. code-block:: bash salt '*' saltutil.refresh_modules ''' asynchronous = bool(kwargs.get('async', True)) try: if asynchronous: # If we're going to block, first setup a listener ret = __salt__['event.fire']({}, 'module_refresh') else: eventer = salt.utils.event.get_event('minion', opts=__opts__, listen=True) ret = __salt__['event.fire']({'notify': True}, 'module_refresh') # Wait for the finish event to fire log.trace('refresh_modules waiting for module refresh to complete') # Blocks until we hear this event or until the timeout expires eventer.get_event( tag=salt.defaults.events.MINION_MOD_COMPLETE, wait=30) except KeyError: log.error('Event module not available. Module refresh failed.') ret = False # Effectively a no-op, since we can't really return without an event system return ret def is_running(fun): ''' If the named function is running return the data associated with it/them. The argument can be a glob CLI Example: .. code-block:: bash salt '*' saltutil.is_running state.highstate ''' run = running() ret = [] for data in run: if fnmatch.fnmatch(data.get('fun', ''), fun): ret.append(data) return ret def running(): ''' Return the data on all running salt processes on the minion CLI Example: .. code-block:: bash salt '*' saltutil.running ''' return salt.utils.minion.running(__opts__) def clear_cache(days=-1): ''' Forcibly removes all caches on a minion. .. versionadded:: 2014.7.0 WARNING: The safest way to clear a minion cache is by first stopping the minion and then deleting the cache files before restarting it. CLI Example: .. code-block:: bash salt '*' saltutil.clear_cache days=7 ''' threshold = time.time() - days * 24 * 60 * 60 for root, dirs, files in salt.utils.files.safe_walk(__opts__['cachedir'], followlinks=False): for name in files: try: file = os.path.join(root, name) mtime = os.path.getmtime(file) if mtime < threshold: os.remove(file) except OSError as exc: log.error( 'Attempt to clear cache with saltutil.clear_cache ' 'FAILED with: %s', exc ) return False return True def clear_job_cache(hours=24): ''' Forcibly removes job cache folders and files on a minion. .. versionadded:: 2018.3.0 WARNING: The safest way to clear a minion cache is by first stopping the minion and then deleting the cache files before restarting it. CLI Example: .. code-block:: bash salt '*' saltutil.clear_job_cache hours=12 ''' threshold = time.time() - hours * 60 * 60 for root, dirs, files in salt.utils.files.safe_walk(os.path.join(__opts__['cachedir'], 'minion_jobs'), followlinks=False): for name in dirs: try: directory = os.path.join(root, name) mtime = os.path.getmtime(directory) if mtime < threshold: shutil.rmtree(directory) except OSError as exc: log.error('Attempt to clear cache with saltutil.clear_job_cache FAILED with: %s', exc) return False return True def find_job(jid): ''' Return the data for a specific job id that is currently running. jid The job id to search for and return data. CLI Example: .. code-block:: bash salt '*' saltutil.find_job <job id> Note that the find_job function only returns job information when the job is still running. If the job is currently running, the output looks something like this: .. code-block:: bash # salt my-minion saltutil.find_job 20160503150049487736 my-minion: ---------- arg: - 30 fun: test.sleep jid: 20160503150049487736 pid: 9601 ret: tgt: my-minion tgt_type: glob user: root If the job has already completed, the job cannot be found and therefore the function returns an empty dictionary, which looks like this on the CLI: .. code-block:: bash # salt my-minion saltutil.find_job 20160503150049487736 my-minion: ---------- ''' for data in running(): if data['jid'] == jid: return data return {} def find_cached_job(jid): ''' Return the data for a specific cached job id. Note this only works if cache_jobs has previously been set to True on the minion. CLI Example: .. code-block:: bash salt '*' saltutil.find_cached_job <job id> ''' serial = salt.payload.Serial(__opts__) proc_dir = os.path.join(__opts__['cachedir'], 'minion_jobs') job_dir = os.path.join(proc_dir, six.text_type(jid)) if not os.path.isdir(job_dir): if not __opts__.get('cache_jobs'): return ('Local jobs cache directory not found; you may need to' ' enable cache_jobs on this minion') else: return 'Local jobs cache directory {0} not found'.format(job_dir) path = os.path.join(job_dir, 'return.p') with salt.utils.files.fopen(path, 'rb') as fp_: buf = fp_.read() if buf: try: data = serial.loads(buf) except NameError: # msgpack error in salt-ssh pass else: if isinstance(data, dict): # if not a dict, this was an invalid serialized object return data return None def signal_job(jid, sig): ''' Sends a signal to the named salt job's process CLI Example: .. code-block:: bash salt '*' saltutil.signal_job <job id> 15 ''' if HAS_PSUTIL is False: log.warning('saltutil.signal job called, but psutil is not installed. ' 'Install psutil to ensure more reliable and accurate PID ' 'management.') for data in running(): if data['jid'] == jid: try: if HAS_PSUTIL: for proc in salt.utils.psutil_compat.Process(pid=data['pid']).children(recursive=True): proc.send_signal(sig) os.kill(int(data['pid']), sig) if HAS_PSUTIL is False and 'child_pids' in data: for pid in data['child_pids']: os.kill(int(pid), sig) return 'Signal {0} sent to job {1} at pid {2}'.format( int(sig), jid, data['pid'] ) except OSError: path = os.path.join(__opts__['cachedir'], 'proc', six.text_type(jid)) if os.path.isfile(path): os.remove(path) return ('Job {0} was not running and job data has been ' ' cleaned up').format(jid) return '' def term_job(jid): ''' Sends a termination signal (SIGTERM 15) to the named salt job's process CLI Example: .. code-block:: bash salt '*' saltutil.term_job <job id> ''' return signal_job(jid, signal.SIGTERM) def term_all_jobs(): ''' Sends a termination signal (SIGTERM 15) to all currently running jobs CLI Example: .. code-block:: bash salt '*' saltutil.term_all_jobs ''' ret = [] for data in running(): ret.append(signal_job(data['jid'], signal.SIGTERM)) return ret def kill_job(jid): ''' Sends a kill signal (SIGKILL 9) to the named salt job's process CLI Example: .. code-block:: bash salt '*' saltutil.kill_job <job id> ''' # Some OS's (Win32) don't have SIGKILL, so use salt_SIGKILL which is set to # an appropriate value for the operating system this is running on. return signal_job(jid, salt_SIGKILL) def kill_all_jobs(): ''' Sends a kill signal (SIGKILL 9) to all currently running jobs CLI Example: .. code-block:: bash salt '*' saltutil.kill_all_jobs ''' # Some OS's (Win32) don't have SIGKILL, so use salt_SIGKILL which is set to # an appropriate value for the operating system this is running on. ret = [] for data in running(): ret.append(signal_job(data['jid'], salt_SIGKILL)) return ret def regen_keys(): ''' Used to regenerate the minion keys. CLI Example: .. code-block:: bash salt '*' saltutil.regen_keys ''' for fn_ in os.listdir(__opts__['pki_dir']): path = os.path.join(__opts__['pki_dir'], fn_) try: os.remove(path) except os.error: pass # TODO: move this into a channel function? Or auth? # create a channel again, this will force the key regen channel = salt.transport.client.ReqChannel.factory(__opts__) channel.close() def revoke_auth(preserve_minion_cache=False): ''' The minion sends a request to the master to revoke its own key. Note that the minion session will be revoked and the minion may not be able to return the result of this command back to the master. If the 'preserve_minion_cache' flag is set to True, the master cache for this minion will not be removed. CLI Example: .. code-block:: bash salt '*' saltutil.revoke_auth ''' masters = list() ret = True if 'master_uri_list' in __opts__: for master_uri in __opts__['master_uri_list']: masters.append(master_uri) else: masters.append(__opts__['master_uri']) for master in masters: channel = salt.transport.client.ReqChannel.factory(__opts__, master_uri=master) tok = channel.auth.gen_token(b'salt') load = {'cmd': 'revoke_auth', 'id': __opts__['id'], 'tok': tok, 'preserve_minion_cache': preserve_minion_cache} try: channel.send(load) except SaltReqTimeoutError: ret = False finally: channel.close() return ret def _get_ssh_or_api_client(cfgfile, ssh=False): if ssh: client = salt.client.ssh.client.SSHClient(cfgfile) else: client = salt.client.get_local_client(cfgfile) return client def _exec(client, tgt, fun, arg, timeout, tgt_type, ret, kwarg, **kwargs): fcn_ret = {} seen = 0 if 'batch' in kwargs: _cmd = client.cmd_batch cmd_kwargs = { 'tgt': tgt, 'fun': fun, 'arg': arg, 'tgt_type': tgt_type, 'ret': ret, 'kwarg': kwarg, 'batch': kwargs['batch'], } del kwargs['batch'] elif 'subset' in kwargs: _cmd = client.cmd_subset cmd_kwargs = { 'tgt': tgt, 'fun': fun, 'arg': arg, 'tgt_type': tgt_type, 'ret': ret, 'cli': True, 'kwarg': kwarg, 'sub': kwargs['subset'], } del kwargs['subset'] elif kwargs.get('asynchronous'): cmd_kwargs = { 'tgt': tgt, 'fun': fun, 'arg': arg, 'tgt_type': tgt_type, 'ret': ret, 'kwarg': kwarg } del kwargs['asynchronous'] # run_job doesnt need processing like the others return client.run_job(**cmd_kwargs) else: _cmd = client.cmd_iter cmd_kwargs = { 'tgt': tgt, 'fun': fun, 'arg': arg, 'timeout': timeout, 'tgt_type': tgt_type, 'ret': ret, 'kwarg': kwarg, } cmd_kwargs.update(kwargs) for ret_comp in _cmd(**cmd_kwargs): fcn_ret.update(ret_comp) seen += 1 # fcn_ret can be empty, so we cannot len the whole return dict if tgt_type == 'list' and len(tgt) == seen: # do not wait for timeout when explicit list matching # and all results are there break return fcn_ret def cmd(tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', kwarg=None, ssh=False, **kwargs): ''' .. versionchanged:: 2017.7.0 The ``expr_form`` argument has been renamed to ``tgt_type``, earlier releases must use ``expr_form``. Assuming this minion is a master, execute a salt command CLI Example: .. code-block:: bash salt '*' saltutil.cmd ''' cfgfile = __opts__['conf_file'] client = _get_ssh_or_api_client(cfgfile, ssh) fcn_ret = _exec( client, tgt, fun, arg, timeout, tgt_type, ret, kwarg, **kwargs) # if return is empty, we may have not used the right conf, # try with the 'minion relative master configuration counter part # if available master_cfgfile = '{0}master'.format(cfgfile[:-6]) # remove 'minion' if ( not fcn_ret and cfgfile.endswith('{0}{1}'.format(os.path.sep, 'minion')) and os.path.exists(master_cfgfile) ): client = _get_ssh_or_api_client(master_cfgfile, ssh) fcn_ret = _exec( client, tgt, fun, arg, timeout, tgt_type, ret, kwarg, **kwargs) if 'batch' in kwargs: old_ret, fcn_ret = fcn_ret, {} for key, value in old_ret.items(): fcn_ret[key] = { 'out': value.get('out', 'highstate') if isinstance(value, dict) else 'highstate', 'ret': value, } return fcn_ret def cmd_iter(tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', kwarg=None, ssh=False, **kwargs): ''' .. versionchanged:: 2017.7.0 The ``expr_form`` argument has been renamed to ``tgt_type``, earlier releases must use ``expr_form``. Assuming this minion is a master, execute a salt command CLI Example: .. code-block:: bash salt '*' saltutil.cmd_iter ''' if ssh: client = salt.client.ssh.client.SSHClient(__opts__['conf_file']) else: client = salt.client.get_local_client(__opts__['conf_file']) for ret in client.cmd_iter( tgt, fun, arg, timeout, tgt_type, ret, kwarg, **kwargs): yield ret def runner(name, arg=None, kwarg=None, full_return=False, saltenv='base', jid=None, asynchronous=False, **kwargs): ''' Execute a runner function. This function must be run on the master, either by targeting a minion running on a master or by using salt-call on a master. .. versionadded:: 2014.7.0 name The name of the function to run kwargs Any keyword arguments to pass to the runner function asynchronous Run the salt command but don't wait for a reply. .. versionadded:: neon CLI Example: In this example, assume that `master_minion` is a minion running on a master. .. code-block:: bash salt master_minion saltutil.runner jobs.list_jobs salt master_minion saltutil.runner test.arg arg="['baz']" kwarg="{'foo': 'bar'}" ''' if arg is None: arg = [] if kwarg is None: kwarg = {} jid = kwargs.pop('__orchestration_jid__', jid) saltenv = kwargs.pop('__env__', saltenv) kwargs = salt.utils.args.clean_kwargs(**kwargs) if kwargs: kwarg.update(kwargs) if 'master_job_cache' not in __opts__: master_config = os.path.join(os.path.dirname(__opts__['conf_file']), 'master') master_opts = salt.config.master_config(master_config) rclient = salt.runner.RunnerClient(master_opts) else: rclient = salt.runner.RunnerClient(__opts__) if name in rclient.functions: aspec = salt.utils.args.get_function_argspec(rclient.functions[name]) if 'saltenv' in aspec.args: kwarg['saltenv'] = saltenv if name in ['state.orchestrate', 'state.orch', 'state.sls']: kwarg['orchestration_jid'] = jid if jid: salt.utils.event.fire_args( __opts__, jid, {'type': 'runner', 'name': name, 'args': arg, 'kwargs': kwarg}, prefix='run' ) if asynchronous: master_key = salt.utils.master.get_master_key('root', __opts__) low = {'arg': arg, 'kwarg': kwarg, 'fun': name, 'key': master_key} return rclient.cmd_async(low) else: return rclient.cmd(name, arg=arg, kwarg=kwarg, print_event=False, full_return=full_return) def wheel(name, *args, **kwargs): ''' Execute a wheel module and function. This function must be run against a minion that is local to the master. .. versionadded:: 2014.7.0 name The name of the function to run args Any positional arguments to pass to the wheel function. A common example of this would be the ``match`` arg needed for key functions. .. versionadded:: v2015.8.11 kwargs Any keyword arguments to pass to the wheel function asynchronous Run the salt command but don't wait for a reply. .. versionadded:: neon CLI Example: .. code-block:: bash salt my-local-minion saltutil.wheel key.accept jerry salt my-local-minion saltutil.wheel minions.connected .. note:: Since this function must be run against a minion that is running locally on the master in order to get accurate returns, if this function is run against minions that are not local to the master, "empty" returns are expected. The remote minion does not have access to wheel functions and their return data. ''' jid = kwargs.pop('__orchestration_jid__', None) saltenv = kwargs.pop('__env__', 'base') if __opts__['__role'] == 'minion': master_config = os.path.join(os.path.dirname(__opts__['conf_file']), 'master') master_opts = salt.config.client_config(master_config) wheel_client = salt.wheel.WheelClient(master_opts) else: wheel_client = salt.wheel.WheelClient(__opts__) # The WheelClient cmd needs args, kwargs, and pub_data separated out from # the "normal" kwargs structure, which at this point contains __pub_x keys. pub_data = {} valid_kwargs = {} for key, val in six.iteritems(kwargs): if key.startswith('__'): pub_data[key] = val else: valid_kwargs[key] = val try: if name in wheel_client.functions: aspec = salt.utils.args.get_function_argspec( wheel_client.functions[name] ) if 'saltenv' in aspec.args: valid_kwargs['saltenv'] = saltenv if jid: salt.utils.event.fire_args( __opts__, jid, {'type': 'wheel', 'name': name, 'args': valid_kwargs}, prefix='run' ) if kwargs.pop('asynchronous', False): master_key = salt.utils.master.get_master_key('root', __opts__) low = {'arg': args, 'kwarg': kwargs, 'fun': name, 'key': master_key} ret = wheel_client.cmd_async(low) else: ret = wheel_client.cmd(name, arg=args, pub_data=pub_data, kwarg=valid_kwargs, print_event=False, full_return=True) except SaltInvocationError as e: raise CommandExecutionError( 'This command can only be executed on a minion that is located on ' 'the master.' ) return ret # this is the only way I could figure out how to get the REAL file_roots # __opt__['file_roots'] is set to __opt__['pillar_root'] class _MMinion(object): def __new__(cls, saltenv, reload_env=False): # this is to break out of salt.loaded.int and make this a true singleton # hack until https://github.com/saltstack/salt/pull/10273 is resolved # this is starting to look like PHP global _mminions # pylint: disable=W0601 if '_mminions' not in globals(): _mminions = {} if saltenv not in _mminions or reload_env: opts = copy.deepcopy(__opts__) del opts['file_roots'] # grains at this point are in the context of the minion global __grains__ # pylint: disable=W0601 grains = copy.deepcopy(__grains__) m = salt.minion.MasterMinion(opts) # this assignment is so that the rest of fxns called by salt still # have minion context __grains__ = grains # this assignment is so that fxns called by mminion have minion # context m.opts['grains'] = grains env_roots = m.opts['file_roots'][saltenv] m.opts['module_dirs'] = [fp + '/_modules' for fp in env_roots] m.gen_modules() _mminions[saltenv] = m return _mminions[saltenv] def mmodule(saltenv, fun, *args, **kwargs): ''' Loads minion modules from an environment so that they can be used in pillars for that environment CLI Example: .. code-block:: bash salt '*' saltutil.mmodule base test.ping ''' mminion = _MMinion(saltenv) return mminion.functions[fun](*args, **kwargs)